2024-11-14 19:51:30,049 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 2024-11-14 19:51:30,105 main DEBUG Took 0.051712 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-14 19:51:30,116 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-14 19:51:30,117 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-14 19:51:30,119 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-14 19:51:30,120 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,143 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-14 19:51:30,223 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,226 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,227 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,228 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,229 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,230 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,231 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,232 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,232 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,234 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,241 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,242 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,243 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,244 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,245 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,245 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,246 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,247 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,247 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,248 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,248 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,249 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 19:51:30,250 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,250 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-14 19:51:30,271 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 19:51:30,273 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-14 19:51:30,276 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-14 19:51:30,277 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-14 19:51:30,299 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-14 19:51:30,300 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 19:51:30,361 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-14 19:51:30,372 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-14 19:51:30,396 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-14 19:51:30,398 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-14 19:51:30,401 main DEBUG createAppenders(={Console}) 2024-11-14 19:51:30,403 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 initialized 2024-11-14 19:51:30,404 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 2024-11-14 19:51:30,405 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 OK. 2024-11-14 19:51:30,406 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-14 19:51:30,407 main DEBUG OutputStream closed 2024-11-14 19:51:30,408 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-14 19:51:30,410 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-14 19:51:30,410 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@1de5f259 OK 2024-11-14 19:51:30,574 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-14 19:51:30,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-14 19:51:30,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-14 19:51:30,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-14 19:51:30,581 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-14 19:51:30,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-14 19:51:30,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-14 19:51:30,583 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-14 19:51:30,584 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-14 19:51:30,584 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-14 19:51:30,585 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-14 19:51:30,585 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-14 19:51:30,586 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-14 19:51:30,586 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-14 19:51:30,586 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-14 19:51:30,587 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-14 19:51:30,587 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-14 19:51:30,589 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-14 19:51:30,592 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14 19:51:30,593 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@2766ca9d) with optional ClassLoader: null 2024-11-14 19:51:30,593 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-14 19:51:30,594 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@2766ca9d] started OK. 2024-11-14T19:51:31,318 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35 2024-11-14 19:51:31,324 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-14 19:51:31,333 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14T19:51:31,345 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-14T19:51:31,521 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=379, ProcessCount=11, AvailableMemoryMB=6314 2024-11-14T19:51:31,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:51:31,574 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a, deleteOnExit=true 2024-11-14T19:51:31,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:51:31,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/test.cache.data in system properties and HBase conf 2024-11-14T19:51:31,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:51:31,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:51:31,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:51:31,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:51:31,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:51:31,765 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-14T19:51:31,967 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:51:31,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:51:31,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:51:31,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:51:31,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:51:31,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:51:31,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:51:32,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:51:32,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:51:32,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:51:32,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:51:32,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:51:32,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:51:32,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:51:32,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:51:32,993 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:51:33,854 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-14T19:51:34,026 INFO [Time-limited test {}] log.Log(170): Logging initialized @6071ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-14T19:51:34,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:51:34,406 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:51:34,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:51:34,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:51:34,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:51:34,574 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:51:34,610 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47a28521{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:51:34,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a7eb645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:51:34,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27b94a93{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/java.io.tmpdir/jetty-localhost-46239-hadoop-hdfs-3_4_1-tests_jar-_-any-14106745154004880172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:51:35,011 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@d38cdad{HTTP/1.1, (http/1.1)}{localhost:46239} 2024-11-14T19:51:35,012 INFO [Time-limited test {}] server.Server(415): Started @7058ms 2024-11-14T19:51:35,063 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:51:35,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:51:36,012 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:51:36,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:51:36,026 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:51:36,030 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:51:36,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79ee2b95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:51:36,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b81322e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:51:36,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fa036b2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/java.io.tmpdir/jetty-localhost-46797-hadoop-hdfs-3_4_1-tests_jar-_-any-8994201462117824722/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:51:36,218 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@19556135{HTTP/1.1, (http/1.1)}{localhost:46797} 2024-11-14T19:51:36,219 INFO [Time-limited test {}] server.Server(415): Started @8265ms 2024-11-14T19:51:36,338 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:51:36,909 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:51:36,921 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:51:36,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:51:36,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:51:36,989 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:51:36,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@644edc50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:51:36,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@626bfd2d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:51:37,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77ee7cbe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/java.io.tmpdir/jetty-localhost-35539-hadoop-hdfs-3_4_1-tests_jar-_-any-4325021653172762059/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:51:37,189 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25aef1db{HTTP/1.1, (http/1.1)}{localhost:35539} 2024-11-14T19:51:37,189 INFO [Time-limited test {}] server.Server(415): Started @9235ms 2024-11-14T19:51:37,192 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:51:38,128 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data3/current/BP-1923608106-172.17.0.2-1731613893194/current, will proceed with Du for space computation calculation, 2024-11-14T19:51:38,147 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data1/current/BP-1923608106-172.17.0.2-1731613893194/current, will proceed with Du for space computation calculation, 2024-11-14T19:51:38,206 WARN [Thread-102 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data2/current/BP-1923608106-172.17.0.2-1731613893194/current, will proceed with Du for space computation calculation, 2024-11-14T19:51:38,214 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data4/current/BP-1923608106-172.17.0.2-1731613893194/current, will proceed with Du for space computation calculation, 2024-11-14T19:51:38,411 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:51:38,415 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:51:38,536 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab3a28330ab9c4a1 with lease ID 0x640617de42c82161: Processing first storage report for DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a from datanode DatanodeRegistration(127.0.0.1:35407, datanodeUuid=4fa82d60-be63-499e-80ab-102485d2f75e, infoPort=41791, infoSecurePort=0, ipcPort=40809, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194) 2024-11-14T19:51:38,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab3a28330ab9c4a1 with lease ID 0x640617de42c82161: from storage DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a node DatanodeRegistration(127.0.0.1:35407, datanodeUuid=4fa82d60-be63-499e-80ab-102485d2f75e, infoPort=41791, infoSecurePort=0, ipcPort=40809, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-14T19:51:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ca7195d1977ee18 with lease ID 0x640617de42c82160: Processing first storage report for DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b from datanode DatanodeRegistration(127.0.0.1:44629, datanodeUuid=cf114025-70b5-480e-beb6-8a9111a10378, infoPort=45091, infoSecurePort=0, ipcPort=33665, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194) 2024-11-14T19:51:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ca7195d1977ee18 with lease ID 0x640617de42c82160: from storage DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b node DatanodeRegistration(127.0.0.1:44629, datanodeUuid=cf114025-70b5-480e-beb6-8a9111a10378, infoPort=45091, infoSecurePort=0, ipcPort=33665, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:51:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xab3a28330ab9c4a1 with lease ID 0x640617de42c82161: Processing first storage report for DS-577e2f6c-93a1-4cb0-88b0-99688d765290 from datanode DatanodeRegistration(127.0.0.1:35407, datanodeUuid=4fa82d60-be63-499e-80ab-102485d2f75e, infoPort=41791, infoSecurePort=0, ipcPort=40809, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194) 2024-11-14T19:51:38,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xab3a28330ab9c4a1 with lease ID 0x640617de42c82161: from storage DS-577e2f6c-93a1-4cb0-88b0-99688d765290 node DatanodeRegistration(127.0.0.1:35407, datanodeUuid=4fa82d60-be63-499e-80ab-102485d2f75e, infoPort=41791, infoSecurePort=0, ipcPort=40809, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T19:51:38,540 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ca7195d1977ee18 with lease ID 0x640617de42c82160: Processing first storage report for DS-ad35cfa0-2db7-4f4e-abdc-391051bc0d35 from datanode DatanodeRegistration(127.0.0.1:44629, datanodeUuid=cf114025-70b5-480e-beb6-8a9111a10378, infoPort=45091, infoSecurePort=0, ipcPort=33665, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194) 2024-11-14T19:51:38,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ca7195d1977ee18 with lease ID 0x640617de42c82160: from storage DS-ad35cfa0-2db7-4f4e-abdc-391051bc0d35 node DatanodeRegistration(127.0.0.1:44629, datanodeUuid=cf114025-70b5-480e-beb6-8a9111a10378, infoPort=45091, infoSecurePort=0, ipcPort=33665, storageInfo=lv=-57;cid=testClusterID;nsid=654934222;c=1731613893194), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:51:38,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35 2024-11-14T19:51:38,765 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/zookeeper_0, clientPort=54518, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:51:38,783 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54518 2024-11-14T19:51:38,819 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:38,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:39,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:51:39,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:51:39,693 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f with version=8 2024-11-14T19:51:39,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:51:39,847 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-14T19:51:40,247 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:51:40,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:51:40,260 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:51:40,267 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:51:40,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:51:40,267 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:51:40,588 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:51:40,680 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-14T19:51:40,693 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-14T19:51:40,725 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:51:40,815 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 71188 (auto-detected) 2024-11-14T19:51:40,816 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-14T19:51:40,860 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44661 2024-11-14T19:51:40,893 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44661 connecting to ZooKeeper ensemble=127.0.0.1:54518 2024-11-14T19:51:41,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446610x0, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:51:41,151 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44661-0x1013c14fd950000 connected 2024-11-14T19:51:41,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:41,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:41,386 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:51:41,406 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f, hbase.cluster.distributed=false 2024-11-14T19:51:41,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:51:41,481 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44661 2024-11-14T19:51:41,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44661 2024-11-14T19:51:41,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44661 2024-11-14T19:51:41,576 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44661 2024-11-14T19:51:41,578 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44661 2024-11-14T19:51:41,783 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:51:41,786 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:51:41,786 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:51:41,787 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:51:41,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:51:41,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:51:41,791 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:51:41,806 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:51:41,820 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45473 2024-11-14T19:51:41,823 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45473 connecting to ZooKeeper ensemble=127.0.0.1:54518 2024-11-14T19:51:41,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:41,829 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:41,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454730x0, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:51:41,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45473-0x1013c14fd950001 connected 2024-11-14T19:51:41,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:51:41,903 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:51:41,931 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:51:41,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:51:41,951 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:51:41,958 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45473 2024-11-14T19:51:41,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45473 2024-11-14T19:51:41,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45473 2024-11-14T19:51:41,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45473 2024-11-14T19:51:41,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45473 2024-11-14T19:51:42,009 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:44661 2024-11-14T19:51:42,021 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,44661,1731613899985 2024-11-14T19:51:42,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:51:42,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:51:42,055 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,44661,1731613899985 2024-11-14T19:51:42,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:42,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:51:42,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:42,114 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:51:42,116 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,44661,1731613899985 from backup master directory 2024-11-14T19:51:42,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,44661,1731613899985 2024-11-14T19:51:42,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:51:42,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:51:42,132 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:51:42,132 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,44661,1731613899985 2024-11-14T19:51:42,134 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-14T19:51:42,151 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-14T19:51:42,293 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase.id] with ID: 6dd537b9-cdd2-4d00-8e06-7fd736fc6751 2024-11-14T19:51:42,293 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/.tmp/hbase.id 2024-11-14T19:51:42,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:51:42,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:51:42,761 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/.tmp/hbase.id]:[hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase.id] 2024-11-14T19:51:42,917 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:42,923 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:51:42,954 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 29ms. 2024-11-14T19:51:42,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:43,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:43,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:51:43,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:51:43,134 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:51:43,139 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:51:43,150 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:51:43,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:51:43,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:51:43,288 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store 2024-11-14T19:51:43,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:51:43,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:51:43,369 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-14T19:51:43,379 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:51:43,381 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:51:43,381 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:51:43,383 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:51:43,384 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:51:43,385 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:51:43,385 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:51:43,386 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731613903381Disabling compacts and flushes for region at 1731613903381Disabling writes for close at 1731613903385 (+4 ms)Writing region close event to WAL at 1731613903385Closed at 1731613903385 2024-11-14T19:51:43,389 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/.initializing 2024-11-14T19:51:43,389 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/WALs/867b237d0fa7,44661,1731613899985 2024-11-14T19:51:43,431 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C44661%2C1731613899985, suffix=, logDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/WALs/867b237d0fa7,44661,1731613899985, archiveDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/oldWALs, maxLogs=10 2024-11-14T19:51:43,444 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C44661%2C1731613899985.1731613903437 2024-11-14T19:51:43,489 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/WALs/867b237d0fa7,44661,1731613899985/867b237d0fa7%2C44661%2C1731613899985.1731613903437 2024-11-14T19:51:43,526 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45091:45091),(127.0.0.1/127.0.0.1:41791:41791)] 2024-11-14T19:51:43,538 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:51:43,539 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:51:43,544 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,545 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:51:43,691 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:43,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:43,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:51:43,703 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:43,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:51:43,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:51:43,722 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:43,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:51:43,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:51:43,731 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:43,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:51:43,734 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,739 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,740 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,757 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,758 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,762 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:51:43,773 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:51:43,783 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:51:43,785 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855989, jitterRate=0.08844718337059021}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:51:43,796 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731613903567Initializing all the Stores at 1731613903574 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613903575 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731613903577 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731613903578 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731613903578Cleaning up temporary data from old regions at 1731613903758 (+180 ms)Region opened successfully at 1731613903795 (+37 ms) 2024-11-14T19:51:43,797 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:51:43,850 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1679603c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:51:43,905 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:51:43,919 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:51:43,920 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:51:43,923 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:51:43,932 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 8 msec 2024-11-14T19:51:43,938 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-14T19:51:43,939 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:51:43,982 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:51:44,009 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:51:44,094 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:51:44,100 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:51:44,102 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:51:44,112 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:51:44,115 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:51:44,131 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:51:44,135 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:51:44,140 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:51:44,144 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:51:44,169 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:51:44,186 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:51:44,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:51:44,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:51:44,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:44,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:44,204 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,44661,1731613899985, sessionid=0x1013c14fd950000, setting cluster-up flag (Was=false) 2024-11-14T19:51:44,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:44,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:44,279 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:51:44,282 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,44661,1731613899985 2024-11-14T19:51:44,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:44,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:44,361 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:51:44,365 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,44661,1731613899985 2024-11-14T19:51:44,373 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:51:44,417 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(746): ClusterId : 6dd537b9-cdd2-4d00-8e06-7fd736fc6751 2024-11-14T19:51:44,420 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:51:44,449 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:51:44,450 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:51:44,463 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:51:44,464 DEBUG [RS:0;867b237d0fa7:45473 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b9c0fbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:51:44,493 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:45473 2024-11-14T19:51:44,497 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:51:44,498 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:51:44,498 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:51:44,507 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,44661,1731613899985 with port=45473, startcode=1731613901697 2024-11-14T19:51:44,514 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:51:44,527 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:51:44,535 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:51:44,535 DEBUG [RS:0;867b237d0fa7:45473 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:51:44,550 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,44661,1731613899985 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:51:44,564 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:51:44,564 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:51:44,564 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:51:44,565 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:51:44,565 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:51:44,565 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:44,565 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:51:44,565 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:44,616 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:51:44,617 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:51:44,631 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:44,632 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:51:44,650 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731613934649 2024-11-14T19:51:44,654 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:51:44,656 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:51:44,654 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36683, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:51:44,663 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44661 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T19:51:44,677 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:51:44,677 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:51:44,679 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:51:44,679 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:51:44,693 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:44,700 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:51:44,716 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:51:44,716 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:51:44,729 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-14T19:51:44,729 WARN [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-14T19:51:44,730 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:51:44,731 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:51:44,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:51:44,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:51:44,736 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731613904734,5,FailOnTimeoutGroup] 2024-11-14T19:51:44,741 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731613904737,5,FailOnTimeoutGroup] 2024-11-14T19:51:44,741 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:44,742 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:51:44,744 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:44,745 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:44,754 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:51:44,755 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f 2024-11-14T19:51:44,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:51:44,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:51:44,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:51:44,831 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,44661,1731613899985 with port=45473, startcode=1731613901697 2024-11-14T19:51:44,834 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,45473,1731613901697 2024-11-14T19:51:44,837 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44661 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,45473,1731613901697 2024-11-14T19:51:44,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:51:44,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:51:44,850 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:44,851 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f 2024-11-14T19:51:44,851 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38605 2024-11-14T19:51:44,851 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:51:44,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:44,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:51:44,871 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:51:44,871 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:44,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:44,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:51:44,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:51:44,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:51:44,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:44,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:44,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:51:44,891 DEBUG [RS:0;867b237d0fa7:45473 {}] zookeeper.ZKUtil(111): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,45473,1731613901697 2024-11-14T19:51:44,891 WARN [RS:0;867b237d0fa7:45473 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:51:44,892 INFO [RS:0;867b237d0fa7:45473 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:51:44,892 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697 2024-11-14T19:51:44,893 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,45473,1731613901697] 2024-11-14T19:51:44,895 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:51:44,895 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:44,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:44,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:51:44,899 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740 2024-11-14T19:51:44,901 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740 2024-11-14T19:51:44,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:51:44,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:51:44,913 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:51:44,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:51:44,928 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:51:44,930 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883589, jitterRate=0.12354202568531036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:51:44,936 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731613904823Initializing all the Stores at 1731613904826 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613904826Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613904827 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731613904827Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613904828 (+1 ms)Cleaning up temporary data from old regions at 1731613904911 (+83 ms)Region opened successfully at 1731613904936 (+25 ms) 2024-11-14T19:51:44,936 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:51:44,937 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:51:44,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:51:44,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:51:44,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:51:44,942 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:51:44,956 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:51:44,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731613904936Disabling compacts and flushes for region at 1731613904936Disabling writes for close at 1731613904937 (+1 ms)Writing region close event to WAL at 1731613904955 (+18 ms)Closed at 1731613904956 (+1 ms) 2024-11-14T19:51:44,961 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:51:44,962 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:51:44,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:51:44,983 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:51:44,993 INFO [RS:0;867b237d0fa7:45473 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:51:44,993 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:44,996 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:51:45,001 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:51:45,012 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:51:45,026 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:51:45,029 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,029 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,029 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,030 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,039 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,039 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,039 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:51:45,040 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,040 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,040 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,040 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,040 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,041 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:51:45,041 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:51:45,041 DEBUG [RS:0;867b237d0fa7:45473 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:51:45,048 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,048 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,049 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,049 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,049 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,049 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,45473,1731613901697-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:51:45,098 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:51:45,102 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,45473,1731613901697-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,104 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,104 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.Replication(171): 867b237d0fa7,45473,1731613901697 started 2024-11-14T19:51:45,133 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:45,135 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,45473,1731613901697, RpcServer on 867b237d0fa7/172.17.0.2:45473, sessionid=0x1013c14fd950001 2024-11-14T19:51:45,136 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:51:45,137 DEBUG [RS:0;867b237d0fa7:45473 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,45473,1731613901697 2024-11-14T19:51:45,137 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,45473,1731613901697' 2024-11-14T19:51:45,137 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:51:45,146 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:51:45,148 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:51:45,149 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:51:45,149 DEBUG [RS:0;867b237d0fa7:45473 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,45473,1731613901697 2024-11-14T19:51:45,149 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,45473,1731613901697' 2024-11-14T19:51:45,149 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:51:45,151 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:51:45,152 DEBUG [RS:0;867b237d0fa7:45473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:51:45,152 INFO [RS:0;867b237d0fa7:45473 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:51:45,152 INFO [RS:0;867b237d0fa7:45473 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:51:45,152 WARN [867b237d0fa7:44661 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T19:51:45,269 INFO [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C45473%2C1731613901697, suffix=, logDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697, archiveDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs, maxLogs=32 2024-11-14T19:51:45,276 INFO [RS:0;867b237d0fa7:45473 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613905275 2024-11-14T19:51:45,318 INFO [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613905275 2024-11-14T19:51:45,331 DEBUG [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41791:41791),(127.0.0.1/127.0.0.1:45091:45091)] 2024-11-14T19:51:45,414 DEBUG [867b237d0fa7:44661 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:51:45,437 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,45473,1731613901697 2024-11-14T19:51:45,460 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,45473,1731613901697, state=OPENING 2024-11-14T19:51:45,502 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:51:45,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:45,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:51:45,523 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:51:45,523 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:51:45,525 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:51:45,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,45473,1731613901697}] 2024-11-14T19:51:45,738 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:51:45,745 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48029, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:51:45,769 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:51:45,770 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:51:45,789 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C45473%2C1731613901697.meta, suffix=.meta, logDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697, archiveDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs, maxLogs=32 2024-11-14T19:51:45,793 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.meta.1731613905792.meta 2024-11-14T19:51:45,836 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.meta.1731613905792.meta 2024-11-14T19:51:45,848 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45091:45091),(127.0.0.1/127.0.0.1:41791:41791)] 2024-11-14T19:51:45,865 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:51:45,867 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:51:45,873 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:51:45,879 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:51:45,884 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:51:45,885 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:51:45,885 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:51:45,885 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:51:45,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:51:45,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:51:45,913 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:45,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:45,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:51:45,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:51:45,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:45,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:45,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:51:45,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:51:45,928 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:45,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:45,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:51:45,934 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:51:45,934 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:45,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:51:45,936 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:51:45,941 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740 2024-11-14T19:51:45,947 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740 2024-11-14T19:51:45,956 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:51:45,956 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:51:45,957 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:51:45,962 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:51:45,969 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782653, jitterRate=-0.004805430769920349}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:51:45,969 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:51:45,972 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731613905886Writing region info on filesystem at 1731613905886Initializing all the Stores at 1731613905888 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613905889 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613905890 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731613905891 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731613905891Cleaning up temporary data from old regions at 1731613905956 (+65 ms)Running coprocessor post-open hooks at 1731613905969 (+13 ms)Region opened successfully at 1731613905972 (+3 ms) 2024-11-14T19:51:45,988 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731613905713 2024-11-14T19:51:46,015 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:51:46,016 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:51:46,049 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,45473,1731613901697 2024-11-14T19:51:46,052 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,45473,1731613901697, state=OPEN 2024-11-14T19:51:46,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:51:46,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:51:46,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:51:46,095 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,45473,1731613901697 2024-11-14T19:51:46,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:51:46,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:51:46,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,45473,1731613901697 in 568 msec 2024-11-14T19:51:46,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:51:46,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.1540 sec 2024-11-14T19:51:46,167 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:51:46,167 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:51:46,202 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:51:46,203 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,45473,1731613901697, seqNum=-1] 2024-11-14T19:51:46,252 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:51:46,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33151, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:51:46,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.8730 sec 2024-11-14T19:51:46,318 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731613906318, completionTime=-1 2024-11-14T19:51:46,322 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:51:46,322 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:51:46,359 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:51:46,359 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731613966359 2024-11-14T19:51:46,360 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614026359 2024-11-14T19:51:46,360 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 37 msec 2024-11-14T19:51:46,365 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44661,1731613899985-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,366 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44661,1731613899985-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,366 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44661,1731613899985-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,373 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:44661, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,373 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,381 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,385 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:51:46,431 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.299sec 2024-11-14T19:51:46,432 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:51:46,434 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:51:46,435 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:51:46,436 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:51:46,436 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:51:46,437 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44661,1731613899985-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:51:46,451 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44661,1731613899985-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:51:46,489 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:51:46,490 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:51:46,490 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44661,1731613899985-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:51:46,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a2ca5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:51:46,538 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-14T19:51:46,538 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-14T19:51:46,542 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,44661,-1 for getting cluster id 2024-11-14T19:51:46,559 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:51:46,608 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6dd537b9-cdd2-4d00-8e06-7fd736fc6751' 2024-11-14T19:51:46,662 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:51:46,663 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6dd537b9-cdd2-4d00-8e06-7fd736fc6751" 2024-11-14T19:51:46,666 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296e8ba8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:51:46,666 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,44661,-1] 2024-11-14T19:51:46,672 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:51:46,688 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:51:46,698 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:51:46,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e495ca4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:51:46,704 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:51:46,719 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,45473,1731613901697, seqNum=-1] 2024-11-14T19:51:46,719 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:51:46,734 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49520, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:51:46,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,44661,1731613899985 2024-11-14T19:51:46,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:51:46,790 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:51:46,796 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T19:51:46,803 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(321): The fetched master address is 867b237d0fa7,44661,1731613899985 2024-11-14T19:51:46,806 DEBUG [Time-limited test {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77479c95 2024-11-14T19:51:46,825 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T19:51:46,833 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T19:51:46,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T19:51:46,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T19:51:46,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:51:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-14T19:51:46,859 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T19:51:46,861 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-14T19:51:46,862 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:46,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T19:51:46,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:51:46,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741835_1011 (size=389) 2024-11-14T19:51:46,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741835_1011 (size=389) 2024-11-14T19:51:46,990 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 34e20e6ff809b16ac933253ba034048e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f 2024-11-14T19:51:47,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741836_1012 (size=72) 2024-11-14T19:51:47,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741836_1012 (size=72) 2024-11-14T19:51:47,066 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:51:47,067 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 34e20e6ff809b16ac933253ba034048e, disabling compactions & flushes 2024-11-14T19:51:47,067 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,067 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,067 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. after waiting 0 ms 2024-11-14T19:51:47,067 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,067 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,067 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 34e20e6ff809b16ac933253ba034048e: Waiting for close lock at 1731613907067Disabling compacts and flushes for region at 1731613907067Disabling writes for close at 1731613907067Writing region close event to WAL at 1731613907067Closed at 1731613907067 2024-11-14T19:51:47,075 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T19:51:47,088 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731613907075"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731613907075"}]},"ts":"1731613907075"} 2024-11-14T19:51:47,105 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T19:51:47,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T19:51:47,113 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731613907108"}]},"ts":"1731613907108"} 2024-11-14T19:51:47,119 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-14T19:51:47,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=34e20e6ff809b16ac933253ba034048e, ASSIGN}] 2024-11-14T19:51:47,125 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=34e20e6ff809b16ac933253ba034048e, ASSIGN 2024-11-14T19:51:47,137 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=34e20e6ff809b16ac933253ba034048e, ASSIGN; state=OFFLINE, location=867b237d0fa7,45473,1731613901697; forceNewPlan=false, retain=false 2024-11-14T19:51:47,289 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=34e20e6ff809b16ac933253ba034048e, regionState=OPENING, regionLocation=867b237d0fa7,45473,1731613901697 2024-11-14T19:51:47,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=34e20e6ff809b16ac933253ba034048e, ASSIGN because future has completed 2024-11-14T19:51:47,298 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 34e20e6ff809b16ac933253ba034048e, server=867b237d0fa7,45473,1731613901697}] 2024-11-14T19:51:47,465 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,466 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 34e20e6ff809b16ac933253ba034048e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:51:47,466 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,466 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:51:47,467 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,467 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,490 INFO [StoreOpener-34e20e6ff809b16ac933253ba034048e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,504 INFO [StoreOpener-34e20e6ff809b16ac933253ba034048e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 34e20e6ff809b16ac933253ba034048e columnFamilyName info 2024-11-14T19:51:47,505 DEBUG [StoreOpener-34e20e6ff809b16ac933253ba034048e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:51:47,506 INFO [StoreOpener-34e20e6ff809b16ac933253ba034048e-1 {}] regionserver.HStore(327): Store=34e20e6ff809b16ac933253ba034048e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:51:47,507 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,509 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,510 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,511 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,511 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,522 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,556 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:51:47,558 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 34e20e6ff809b16ac933253ba034048e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710256, jitterRate=-0.09686298668384552}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:51:47,558 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:51:47,560 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 34e20e6ff809b16ac933253ba034048e: Running coprocessor pre-open hook at 1731613907469Writing region info on filesystem at 1731613907469Initializing all the Stores at 1731613907473 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731613907473Cleaning up temporary data from old regions at 1731613907511 (+38 ms)Running coprocessor post-open hooks at 1731613907558 (+47 ms)Region opened successfully at 1731613907560 (+2 ms) 2024-11-14T19:51:47,563 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e., pid=6, masterSystemTime=1731613907454 2024-11-14T19:51:47,570 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,570 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:47,571 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=34e20e6ff809b16ac933253ba034048e, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,45473,1731613901697 2024-11-14T19:51:47,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 34e20e6ff809b16ac933253ba034048e, server=867b237d0fa7,45473,1731613901697 because future has completed 2024-11-14T19:51:47,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T19:51:47,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 34e20e6ff809b16ac933253ba034048e, server=867b237d0fa7,45473,1731613901697 in 287 msec 2024-11-14T19:51:47,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T19:51:47,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=34e20e6ff809b16ac933253ba034048e, ASSIGN in 471 msec 2024-11-14T19:51:47,600 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T19:51:47,601 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731613907600"}]},"ts":"1731613907600"} 2024-11-14T19:51:47,609 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-14T19:51:47,612 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T19:51:47,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 767 msec 2024-11-14T19:51:50,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:51:50,671 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T19:51:50,686 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T19:51:50,686 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T19:51:50,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:51:50,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T19:51:50,689 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:51:50,689 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T19:51:51,411 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-14T19:51:51,452 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T19:51:51,453 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-14T19:51:57,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:51:57,009 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-14T19:51:57,013 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-14T19:51:57,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-14T19:51:57,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:51:57,022 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613917022 2024-11-14T19:51:57,032 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:51:57,032 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:51:57,032 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:51:57,032 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:51:57,032 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:51:57,033 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613905275 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613917022 2024-11-14T19:51:57,034 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41791:41791),(127.0.0.1/127.0.0.1:45091:45091)] 2024-11-14T19:51:57,034 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613905275 is not closed yet, will try archiving it next time 2024-11-14T19:51:57,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741833_1009 (size=451) 2024-11-14T19:51:57,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741833_1009 (size=451) 2024-11-14T19:51:57,038 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613905275 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613905275 2024-11-14T19:51:57,045 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e., hostname=867b237d0fa7,45473,1731613901697, seqNum=2] 2024-11-14T19:52:08,624 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:52:09,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45473 {}] regionserver.HRegion(8855): Flush requested on 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:52:09,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 34e20e6ff809b16ac933253ba034048e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:52:09,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/96705689fa954b5fae866c212e468e56 is 1080, key is row0001/info:/1731613917048/Put/seqid=0 2024-11-14T19:52:09,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741838_1014 (size=12509) 2024-11-14T19:52:09,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741838_1014 (size=12509) 2024-11-14T19:52:09,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/96705689fa954b5fae866c212e468e56 2024-11-14T19:52:09,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/96705689fa954b5fae866c212e468e56 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56 2024-11-14T19:52:09,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T19:52:09,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 34e20e6ff809b16ac933253ba034048e in 181ms, sequenceid=11, compaction requested=false 2024-11-14T19:52:09,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 34e20e6ff809b16ac933253ba034048e: 2024-11-14T19:52:17,100 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613937100 2024-11-14T19:52:17,314 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:17,314 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:17,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:17,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:17,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:17,316 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:17,316 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613917022 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613937100 2024-11-14T19:52:17,318 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41791:41791),(127.0.0.1/127.0.0.1:45091:45091)] 2024-11-14T19:52:17,318 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613917022 is not closed yet, will try archiving it next time 2024-11-14T19:52:17,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741837_1013 (size=12399) 2024-11-14T19:52:17,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741837_1013 (size=12399) 2024-11-14T19:52:17,523 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:19,730 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:21,937 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:24,143 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:24,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45473 {}] regionserver.HRegion(8855): Flush requested on 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:52:24,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 34e20e6ff809b16ac933253ba034048e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:52:24,345 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:24,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/2f730dfd779644d38244b0251cd85ed2 is 1080, key is row0008/info:/1731613931084/Put/seqid=0 2024-11-14T19:52:24,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741840_1016 (size=12509) 2024-11-14T19:52:24,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741840_1016 (size=12509) 2024-11-14T19:52:24,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/2f730dfd779644d38244b0251cd85ed2 2024-11-14T19:52:24,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/2f730dfd779644d38244b0251cd85ed2 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/2f730dfd779644d38244b0251cd85ed2 2024-11-14T19:52:24,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/2f730dfd779644d38244b0251cd85ed2, entries=7, sequenceid=21, filesize=12.2 K 2024-11-14T19:52:24,591 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:24,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 34e20e6ff809b16ac933253ba034048e in 448ms, sequenceid=21, compaction requested=false 2024-11-14T19:52:24,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 34e20e6ff809b16ac933253ba034048e: 2024-11-14T19:52:24,591 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-14T19:52:24,591 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:52:24,592 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56 because midkey is the same as first or last row 2024-11-14T19:52:26,348 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:26,860 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T19:52:26,860 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T19:52:28,555 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:28,559 WARN [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:28,561 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C45473%2C1731613901697:(num 1731613937100) roll requested 2024-11-14T19:52:28,562 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613948562 2024-11-14T19:52:28,776 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK], DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK]] 2024-11-14T19:52:28,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:28,777 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:28,777 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:28,777 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:28,777 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:28,777 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613937100 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613948562 2024-11-14T19:52:28,778 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45091:45091),(127.0.0.1/127.0.0.1:41791:41791)] 2024-11-14T19:52:28,778 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613937100 is not closed yet, will try archiving it next time 2024-11-14T19:52:28,779 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613917022 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613917022 2024-11-14T19:52:28,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741839_1015 (size=7739) 2024-11-14T19:52:28,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741839_1015 (size=7739) 2024-11-14T19:52:30,761 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:32,467 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 34e20e6ff809b16ac933253ba034048e, had cached 0 bytes from a total of 25018 2024-11-14T19:52:32,968 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:35,172 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:37,378 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:38,625 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:52:39,382 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T19:52:39,383 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613959382 2024-11-14T19:52:44,398 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:44,403 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:44,404 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C45473%2C1731613901697:(num 1731613959382) roll requested 2024-11-14T19:52:44,404 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:44,404 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:44,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:44,405 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:44,405 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:44,406 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613948562 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613959382 2024-11-14T19:52:44,407 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45091:45091),(127.0.0.1/127.0.0.1:41791:41791)] 2024-11-14T19:52:44,407 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613948562 is not closed yet, will try archiving it next time 2024-11-14T19:52:44,407 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613964407 2024-11-14T19:52:44,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741841_1017 (size=4753) 2024-11-14T19:52:44,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741841_1017 (size=4753) 2024-11-14T19:52:49,411 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:49,411 WARN [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45473 {}] regionserver.HRegion(8855): Flush requested on 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:52:49,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 34e20e6ff809b16ac933253ba034048e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:52:49,416 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:49,416 WARN [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:51,412 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T19:52:54,414 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:54,414 WARN [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:54,414 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:54,414 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:54,414 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:54,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:54,414 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:54,415 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613959382 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613964407 2024-11-14T19:52:54,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741842_1018 (size=1569) 2024-11-14T19:52:54,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741842_1018 (size=1569) 2024-11-14T19:52:54,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/3dcf598a1ee84c6a8b561b4c92be463f is 1080, key is row0015/info:/1731613946146/Put/seqid=0 2024-11-14T19:52:54,435 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45091:45091),(127.0.0.1/127.0.0.1:41791:41791)] 2024-11-14T19:52:54,435 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C45473%2C1731613901697:(num 1731613964407) roll requested 2024-11-14T19:52:54,436 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613974435 2024-11-14T19:52:54,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741844_1020 (size=12509) 2024-11-14T19:52:54,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741844_1020 (size=12509) 2024-11-14T19:52:54,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/3dcf598a1ee84c6a8b561b4c92be463f 2024-11-14T19:52:54,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/3dcf598a1ee84c6a8b561b4c92be463f as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/3dcf598a1ee84c6a8b561b4c92be463f 2024-11-14T19:52:54,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/3dcf598a1ee84c6a8b561b4c92be463f, entries=7, sequenceid=31, filesize=12.2 K 2024-11-14T19:52:59,449 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:59,449 WARN [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:59,483 INFO [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:59,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 34e20e6ff809b16ac933253ba034048e in 10072ms, sequenceid=31, compaction requested=true 2024-11-14T19:52:59,483 WARN [FSHLog-0-hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f-prefix:867b237d0fa7,45473,1731613901697 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44629,DS-23d19aca-8593-4d1a-a745-e2ef7a2b946b,DISK], DatanodeInfoWithStorage[127.0.0.1:35407,DS-6ee5d337-162a-4f47-b8fa-da2b6a06d67a,DISK]] 2024-11-14T19:52:59,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 34e20e6ff809b16ac933253ba034048e: 2024-11-14T19:52:59,483 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-14T19:52:59,483 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:52:59,483 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56 because midkey is the same as first or last row 2024-11-14T19:52:59,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,483 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,484 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,484 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,484 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,484 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613964407 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613974435 2024-11-14T19:52:59,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 34e20e6ff809b16ac933253ba034048e:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:52:59,485 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41791:41791),(127.0.0.1/127.0.0.1:45091:45091)] 2024-11-14T19:52:59,485 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613964407 is not closed yet, will try archiving it next time 2024-11-14T19:52:59,486 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613937100 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613937100 2024-11-14T19:52:59,486 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C45473%2C1731613901697:(num 1731613974435) roll requested 2024-11-14T19:52:59,486 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613979486 2024-11-14T19:52:59,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741843_1019 (size=438) 2024-11-14T19:52:59,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:52:59,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741843_1019 (size=438) 2024-11-14T19:52:59,488 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:52:59,488 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613948562 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613948562 2024-11-14T19:52:59,490 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613959382 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613959382 2024-11-14T19:52:59,491 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:52:59,491 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613964407 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613964407 2024-11-14T19:52:59,493 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.HStore(1541): 34e20e6ff809b16ac933253ba034048e/info is initiating minor compaction (all files) 2024-11-14T19:52:59,494 INFO [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 34e20e6ff809b16ac933253ba034048e/info in TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:52:59,494 INFO [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/2f730dfd779644d38244b0251cd85ed2, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/3dcf598a1ee84c6a8b561b4c92be463f] into tmpdir=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp, totalSize=36.6 K 2024-11-14T19:52:59,496 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] compactions.Compactor(225): Compacting 96705689fa954b5fae866c212e468e56, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731613917048 2024-11-14T19:52:59,496 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,496 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,496 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,496 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,497 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,497 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613974435 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613979486 2024-11-14T19:52:59,497 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f730dfd779644d38244b0251cd85ed2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731613931084 2024-11-14T19:52:59,498 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3dcf598a1ee84c6a8b561b4c92be463f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731613946146 2024-11-14T19:52:59,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741845_1021 (size=93) 2024-11-14T19:52:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741845_1021 (size=93) 2024-11-14T19:52:59,506 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41791:41791),(127.0.0.1/127.0.0.1:45091:45091)] 2024-11-14T19:52:59,506 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613974435 is not closed yet, will try archiving it next time 2024-11-14T19:52:59,506 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C45473%2C1731613901697.1731613979506 2024-11-14T19:52:59,515 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,515 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,515 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,515 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,515 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:52:59,516 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613979486 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613979506 2024-11-14T19:52:59,520 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45091:45091),(127.0.0.1/127.0.0.1:41791:41791)] 2024-11-14T19:52:59,520 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613974435 is not closed yet, will try archiving it next time 2024-11-14T19:52:59,520 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613979486 is not closed yet, will try archiving it next time 2024-11-14T19:52:59,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741846_1022 (size=1258) 2024-11-14T19:52:59,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741846_1022 (size=1258) 2024-11-14T19:52:59,523 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613974435 is not closed yet, will try archiving it next time 2024-11-14T19:52:59,532 INFO [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 34e20e6ff809b16ac933253ba034048e#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:52:59,533 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/4a55244a303441dba6da1095026cddc2 is 1080, key is row0001/info:/1731613917048/Put/seqid=0 2024-11-14T19:52:59,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741848_1024 (size=27710) 2024-11-14T19:52:59,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741848_1024 (size=27710) 2024-11-14T19:52:59,553 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/4a55244a303441dba6da1095026cddc2 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/4a55244a303441dba6da1095026cddc2 2024-11-14T19:52:59,571 INFO [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 34e20e6ff809b16ac933253ba034048e/info of 34e20e6ff809b16ac933253ba034048e into 4a55244a303441dba6da1095026cddc2(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:52:59,572 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 34e20e6ff809b16ac933253ba034048e: 2024-11-14T19:52:59,574 INFO [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e., storeName=34e20e6ff809b16ac933253ba034048e/info, priority=13, startTime=1731613979485; duration=0sec 2024-11-14T19:52:59,574 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T19:52:59,574 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:52:59,574 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/4a55244a303441dba6da1095026cddc2 because midkey is the same as first or last row 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/4a55244a303441dba6da1095026cddc2 because midkey is the same as first or last row 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/4a55244a303441dba6da1095026cddc2 because midkey is the same as first or last row 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:52:59,575 DEBUG [RS:0;867b237d0fa7:45473-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 34e20e6ff809b16ac933253ba034048e:info 2024-11-14T19:52:59,902 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/WALs/867b237d0fa7,45473,1731613901697/867b237d0fa7%2C45473%2C1731613901697.1731613974435 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs/867b237d0fa7%2C45473%2C1731613901697.1731613974435 2024-11-14T19:53:08,625 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:53:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45473 {}] regionserver.HRegion(8855): Flush requested on 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:53:11,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 34e20e6ff809b16ac933253ba034048e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:53:11,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/e0938f2abdab4af4860ef0e49dbf169c is 1080, key is row0022/info:/1731613979508/Put/seqid=0 2024-11-14T19:53:11,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741849_1025 (size=12509) 2024-11-14T19:53:11,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741849_1025 (size=12509) 2024-11-14T19:53:11,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/e0938f2abdab4af4860ef0e49dbf169c 2024-11-14T19:53:11,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/e0938f2abdab4af4860ef0e49dbf169c as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/e0938f2abdab4af4860ef0e49dbf169c 2024-11-14T19:53:11,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/e0938f2abdab4af4860ef0e49dbf169c, entries=7, sequenceid=42, filesize=12.2 K 2024-11-14T19:53:11,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 34e20e6ff809b16ac933253ba034048e in 38ms, sequenceid=42, compaction requested=false 2024-11-14T19:53:11,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 34e20e6ff809b16ac933253ba034048e: 2024-11-14T19:53:11,573 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-14T19:53:11,573 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:11,573 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/4a55244a303441dba6da1095026cddc2 because midkey is the same as first or last row 2024-11-14T19:53:17,468 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 34e20e6ff809b16ac933253ba034048e, had cached 0 bytes from a total of 40219 2024-11-14T19:53:19,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:53:19,547 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:53:19,547 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:53:19,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:19,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:19,552 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:53:19,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:53:19,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2135136293, stopped=false 2024-11-14T19:53:19,553 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,44661,1731613899985 2024-11-14T19:53:19,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:19,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:19,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:19,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:19,685 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:53:19,686 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:53:19,686 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:53:19,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:19,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:19,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:19,687 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,45473,1731613901697' ***** 2024-11-14T19:53:19,688 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:53:19,688 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:53:19,689 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:53:19,689 INFO [RS:0;867b237d0fa7:45473 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:53:19,689 INFO [RS:0;867b237d0fa7:45473 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:53:19,689 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(3091): Received CLOSE for 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:53:19,690 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,45473,1731613901697 2024-11-14T19:53:19,690 INFO [RS:0;867b237d0fa7:45473 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:53:19,691 INFO [RS:0;867b237d0fa7:45473 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:45473. 2024-11-14T19:53:19,691 DEBUG [RS:0;867b237d0fa7:45473 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:53:19,691 DEBUG [RS:0;867b237d0fa7:45473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:19,691 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 34e20e6ff809b16ac933253ba034048e, disabling compactions & flushes 2024-11-14T19:53:19,691 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:53:19,691 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:53:19,691 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:53:19,691 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:53:19,691 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:53:19,691 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. after waiting 0 ms 2024-11-14T19:53:19,692 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:53:19,692 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:53:19,692 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 34e20e6ff809b16ac933253ba034048e 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-14T19:53:19,692 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T19:53:19,692 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:53:19,693 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1325): Online Regions={34e20e6ff809b16ac933253ba034048e=TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T19:53:19,693 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:53:19,693 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:53:19,693 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:53:19,693 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:53:19,693 DEBUG [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 34e20e6ff809b16ac933253ba034048e 2024-11-14T19:53:19,693 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-14T19:53:19,699 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/b17939cbd9df4b3f8d78986b37548254 is 1080, key is row0029/info:/1731613993536/Put/seqid=0 2024-11-14T19:53:19,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741850_1026 (size=8193) 2024-11-14T19:53:19,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741850_1026 (size=8193) 2024-11-14T19:53:19,706 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/b17939cbd9df4b3f8d78986b37548254 2024-11-14T19:53:19,717 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/info/280bad7fae6942a58c940dfab0d28052 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e./info:regioninfo/1731613907571/Put/seqid=0 2024-11-14T19:53:19,718 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/.tmp/info/b17939cbd9df4b3f8d78986b37548254 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/b17939cbd9df4b3f8d78986b37548254 2024-11-14T19:53:19,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741851_1027 (size=7016) 2024-11-14T19:53:19,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741851_1027 (size=7016) 2024-11-14T19:53:19,725 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/info/280bad7fae6942a58c940dfab0d28052 2024-11-14T19:53:19,730 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/b17939cbd9df4b3f8d78986b37548254, entries=3, sequenceid=48, filesize=8.0 K 2024-11-14T19:53:19,732 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 34e20e6ff809b16ac933253ba034048e in 40ms, sequenceid=48, compaction requested=true 2024-11-14T19:53:19,733 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/2f730dfd779644d38244b0251cd85ed2, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/3dcf598a1ee84c6a8b561b4c92be463f] to archive 2024-11-14T19:53:19,737 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T19:53:19,741 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/96705689fa954b5fae866c212e468e56 2024-11-14T19:53:19,744 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/2f730dfd779644d38244b0251cd85ed2 to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/2f730dfd779644d38244b0251cd85ed2 2024-11-14T19:53:19,748 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/3dcf598a1ee84c6a8b561b4c92be463f to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/info/3dcf598a1ee84c6a8b561b4c92be463f 2024-11-14T19:53:19,755 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/ns/48636385eb874cfeae581b3c4d21ae6f is 43, key is default/ns:d/1731613906262/Put/seqid=0 2024-11-14T19:53:19,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741852_1028 (size=5153) 2024-11-14T19:53:19,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741852_1028 (size=5153) 2024-11-14T19:53:19,762 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/ns/48636385eb874cfeae581b3c4d21ae6f 2024-11-14T19:53:19,763 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=867b237d0fa7:44661 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T19:53:19,764 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [96705689fa954b5fae866c212e468e56=12509, 2f730dfd779644d38244b0251cd85ed2=12509, 3dcf598a1ee84c6a8b561b4c92be463f=12509] 2024-11-14T19:53:19,770 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/default/TestLogRolling-testSlowSyncLogRolling/34e20e6ff809b16ac933253ba034048e/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-14T19:53:19,772 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:53:19,773 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 34e20e6ff809b16ac933253ba034048e: Waiting for close lock at 1731613999690Running coprocessor pre-close hooks at 1731613999691 (+1 ms)Disabling compacts and flushes for region at 1731613999691Disabling writes for close at 1731613999692 (+1 ms)Obtaining lock to block concurrent updates at 1731613999692Preparing flush snapshotting stores in 34e20e6ff809b16ac933253ba034048e at 1731613999692Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731613999693 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. at 1731613999694 (+1 ms)Flushing 34e20e6ff809b16ac933253ba034048e/info: creating writer at 1731613999694Flushing 34e20e6ff809b16ac933253ba034048e/info: appending metadata at 1731613999699 (+5 ms)Flushing 34e20e6ff809b16ac933253ba034048e/info: closing flushed file at 1731613999699Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b3737cd: reopening flushed file at 1731613999716 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 34e20e6ff809b16ac933253ba034048e in 40ms, sequenceid=48, compaction requested=true at 1731613999732 (+16 ms)Writing region close event to WAL at 1731613999765 (+33 ms)Running coprocessor post-close hooks at 1731613999771 (+6 ms)Closed at 1731613999772 (+1 ms) 2024-11-14T19:53:19,773 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731613906835.34e20e6ff809b16ac933253ba034048e. 2024-11-14T19:53:19,790 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/table/f646b5dbd1484153b0a41b2fb71c345b is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731613907600/Put/seqid=0 2024-11-14T19:53:19,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741853_1029 (size=5396) 2024-11-14T19:53:19,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741853_1029 (size=5396) 2024-11-14T19:53:19,797 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/table/f646b5dbd1484153b0a41b2fb71c345b 2024-11-14T19:53:19,805 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/info/280bad7fae6942a58c940dfab0d28052 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/info/280bad7fae6942a58c940dfab0d28052 2024-11-14T19:53:19,814 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/info/280bad7fae6942a58c940dfab0d28052, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T19:53:19,815 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/ns/48636385eb874cfeae581b3c4d21ae6f as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/ns/48636385eb874cfeae581b3c4d21ae6f 2024-11-14T19:53:19,823 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/ns/48636385eb874cfeae581b3c4d21ae6f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T19:53:19,824 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/.tmp/table/f646b5dbd1484153b0a41b2fb71c345b as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/table/f646b5dbd1484153b0a41b2fb71c345b 2024-11-14T19:53:19,832 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/table/f646b5dbd1484153b0a41b2fb71c345b, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T19:53:19,834 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=11, compaction requested=false 2024-11-14T19:53:19,840 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T19:53:19,841 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:53:19,841 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:53:19,841 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731613999692Running coprocessor pre-close hooks at 1731613999692Disabling compacts and flushes for region at 1731613999692Disabling writes for close at 1731613999693 (+1 ms)Obtaining lock to block concurrent updates at 1731613999693Preparing flush snapshotting stores in 1588230740 at 1731613999693Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731613999694 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731613999695 (+1 ms)Flushing 1588230740/info: creating writer at 1731613999695Flushing 1588230740/info: appending metadata at 1731613999716 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731613999716Flushing 1588230740/ns: creating writer at 1731613999735 (+19 ms)Flushing 1588230740/ns: appending metadata at 1731613999754 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731613999754Flushing 1588230740/table: creating writer at 1731613999772 (+18 ms)Flushing 1588230740/table: appending metadata at 1731613999790 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731613999790Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fce7c7d: reopening flushed file at 1731613999804 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2db7375f: reopening flushed file at 1731613999814 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f104da4: reopening flushed file at 1731613999823 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=11, compaction requested=false at 1731613999834 (+11 ms)Writing region close event to WAL at 1731613999835 (+1 ms)Running coprocessor post-close hooks at 1731613999841 (+6 ms)Closed at 1731613999841 2024-11-14T19:53:19,841 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:53:19,894 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,45473,1731613901697; all regions closed. 2024-11-14T19:53:19,895 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,896 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,896 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,896 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,896 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741834_1010 (size=3066) 2024-11-14T19:53:19,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741834_1010 (size=3066) 2024-11-14T19:53:19,902 DEBUG [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs 2024-11-14T19:53:19,902 INFO [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C45473%2C1731613901697.meta:.meta(num 1731613905792) 2024-11-14T19:53:19,903 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,903 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,903 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,903 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,904 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:19,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741847_1023 (size=12695) 2024-11-14T19:53:19,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741847_1023 (size=12695) 2024-11-14T19:53:19,910 DEBUG [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/oldWALs 2024-11-14T19:53:19,910 INFO [RS:0;867b237d0fa7:45473 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C45473%2C1731613901697:(num 1731613979506) 2024-11-14T19:53:19,910 DEBUG [RS:0;867b237d0fa7:45473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:19,910 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:53:19,910 INFO [RS:0;867b237d0fa7:45473 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:53:19,910 INFO [RS:0;867b237d0fa7:45473 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T19:53:19,910 INFO [RS:0;867b237d0fa7:45473 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:53:19,910 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:53:19,911 INFO [RS:0;867b237d0fa7:45473 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45473 2024-11-14T19:53:19,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,45473,1731613901697 2024-11-14T19:53:19,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:53:19,922 INFO [RS:0;867b237d0fa7:45473 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:53:19,923 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,45473,1731613901697] 2024-11-14T19:53:19,939 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,45473,1731613901697 already deleted, retry=false 2024-11-14T19:53:19,939 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,45473,1731613901697 expired; onlineServers=0 2024-11-14T19:53:19,939 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,44661,1731613899985' ***** 2024-11-14T19:53:19,939 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:53:19,939 INFO [M:0;867b237d0fa7:44661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:53:19,940 INFO [M:0;867b237d0fa7:44661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:53:19,940 DEBUG [M:0;867b237d0fa7:44661 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:53:19,940 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:53:19,940 DEBUG [M:0;867b237d0fa7:44661 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:53:19,940 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731613904734 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731613904734,5,FailOnTimeoutGroup] 2024-11-14T19:53:19,940 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731613904737 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731613904737,5,FailOnTimeoutGroup] 2024-11-14T19:53:19,940 INFO [M:0;867b237d0fa7:44661 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:53:19,941 INFO [M:0;867b237d0fa7:44661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:53:19,941 DEBUG [M:0;867b237d0fa7:44661 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:53:19,941 INFO [M:0;867b237d0fa7:44661 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:53:19,941 INFO [M:0;867b237d0fa7:44661 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:53:19,942 INFO [M:0;867b237d0fa7:44661 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:53:19,942 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:53:19,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:53:19,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:19,947 DEBUG [M:0;867b237d0fa7:44661 {}] zookeeper.ZKUtil(347): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T19:53:19,948 WARN [M:0;867b237d0fa7:44661 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T19:53:19,950 INFO [M:0;867b237d0fa7:44661 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/.lastflushedseqids 2024-11-14T19:53:19,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741854_1030 (size=130) 2024-11-14T19:53:19,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741854_1030 (size=130) 2024-11-14T19:53:19,966 INFO [M:0;867b237d0fa7:44661 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:53:19,967 INFO [M:0;867b237d0fa7:44661 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:53:19,967 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:53:19,967 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:19,967 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:19,967 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:53:19,967 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:19,967 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-14T19:53:19,990 DEBUG [M:0;867b237d0fa7:44661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ab3e9f421b04b94bb92c25a8b6c7141 is 82, key is hbase:meta,,1/info:regioninfo/1731613906048/Put/seqid=0 2024-11-14T19:53:19,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741855_1031 (size=5672) 2024-11-14T19:53:19,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741855_1031 (size=5672) 2024-11-14T19:53:19,998 INFO [M:0;867b237d0fa7:44661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ab3e9f421b04b94bb92c25a8b6c7141 2024-11-14T19:53:20,022 DEBUG [M:0;867b237d0fa7:44661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/70b93f62dea647059d685e7e8cb8ce61 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731613907616/Put/seqid=0 2024-11-14T19:53:20,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741856_1032 (size=6247) 2024-11-14T19:53:20,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741856_1032 (size=6247) 2024-11-14T19:53:20,029 INFO [M:0;867b237d0fa7:44661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/70b93f62dea647059d685e7e8cb8ce61 2024-11-14T19:53:20,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:20,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45473-0x1013c14fd950001, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:20,031 INFO [RS:0;867b237d0fa7:45473 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:53:20,031 INFO [RS:0;867b237d0fa7:45473 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,45473,1731613901697; zookeeper connection closed. 2024-11-14T19:53:20,032 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f2c8f85 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f2c8f85 2024-11-14T19:53:20,032 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T19:53:20,036 INFO [M:0;867b237d0fa7:44661 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 70b93f62dea647059d685e7e8cb8ce61 2024-11-14T19:53:20,052 DEBUG [M:0;867b237d0fa7:44661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d6a3c3f597249f29aaa33bf810f158c is 69, key is 867b237d0fa7,45473,1731613901697/rs:state/1731613904839/Put/seqid=0 2024-11-14T19:53:20,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741857_1033 (size=5156) 2024-11-14T19:53:20,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741857_1033 (size=5156) 2024-11-14T19:53:20,059 INFO [M:0;867b237d0fa7:44661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d6a3c3f597249f29aaa33bf810f158c 2024-11-14T19:53:20,089 DEBUG [M:0;867b237d0fa7:44661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b2f784e7f83543cab38b246740fafcd8 is 52, key is load_balancer_on/state:d/1731613906784/Put/seqid=0 2024-11-14T19:53:20,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741858_1034 (size=5056) 2024-11-14T19:53:20,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741858_1034 (size=5056) 2024-11-14T19:53:20,097 INFO [M:0;867b237d0fa7:44661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b2f784e7f83543cab38b246740fafcd8 2024-11-14T19:53:20,105 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ab3e9f421b04b94bb92c25a8b6c7141 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0ab3e9f421b04b94bb92c25a8b6c7141 2024-11-14T19:53:20,113 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0ab3e9f421b04b94bb92c25a8b6c7141, entries=8, sequenceid=59, filesize=5.5 K 2024-11-14T19:53:20,115 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/70b93f62dea647059d685e7e8cb8ce61 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/70b93f62dea647059d685e7e8cb8ce61 2024-11-14T19:53:20,123 INFO [M:0;867b237d0fa7:44661 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 70b93f62dea647059d685e7e8cb8ce61 2024-11-14T19:53:20,123 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/70b93f62dea647059d685e7e8cb8ce61, entries=6, sequenceid=59, filesize=6.1 K 2024-11-14T19:53:20,125 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d6a3c3f597249f29aaa33bf810f158c as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d6a3c3f597249f29aaa33bf810f158c 2024-11-14T19:53:20,133 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d6a3c3f597249f29aaa33bf810f158c, entries=1, sequenceid=59, filesize=5.0 K 2024-11-14T19:53:20,135 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b2f784e7f83543cab38b246740fafcd8 as hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b2f784e7f83543cab38b246740fafcd8 2024-11-14T19:53:20,143 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b2f784e7f83543cab38b246740fafcd8, entries=1, sequenceid=59, filesize=4.9 K 2024-11-14T19:53:20,145 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=59, compaction requested=false 2024-11-14T19:53:20,147 INFO [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:20,147 DEBUG [M:0;867b237d0fa7:44661 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731613999967Disabling compacts and flushes for region at 1731613999967Disabling writes for close at 1731613999967Obtaining lock to block concurrent updates at 1731613999967Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731613999967Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731613999968 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731613999969 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731613999969Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731613999989 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731613999989Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614000005 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614000021 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614000021Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614000037 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614000052 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614000052Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614000067 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614000088 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614000088Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ec3874d: reopening flushed file at 1731614000104 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70cb8198: reopening flushed file at 1731614000114 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78ced8c1: reopening flushed file at 1731614000124 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f249fb8: reopening flushed file at 1731614000134 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=59, compaction requested=false at 1731614000145 (+11 ms)Writing region close event to WAL at 1731614000147 (+2 ms)Closed at 1731614000147 2024-11-14T19:53:20,148 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:20,148 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:20,148 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:20,148 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:20,148 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:20,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35407 is added to blk_1073741830_1006 (size=27973) 2024-11-14T19:53:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44629 is added to blk_1073741830_1006 (size=27973) 2024-11-14T19:53:20,152 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:53:20,152 INFO [M:0;867b237d0fa7:44661 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:53:20,152 INFO [M:0;867b237d0fa7:44661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44661 2024-11-14T19:53:20,153 INFO [M:0;867b237d0fa7:44661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:53:20,263 INFO [M:0;867b237d0fa7:44661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:53:20,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:20,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44661-0x1013c14fd950000, quorum=127.0.0.1:54518, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:20,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77ee7cbe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:20,270 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25aef1db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:20,270 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:20,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@626bfd2d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:20,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@644edc50{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:20,273 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:20,273 WARN [BP-1923608106-172.17.0.2-1731613893194 heartbeating to localhost/127.0.0.1:38605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:20,273 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:20,274 WARN [BP-1923608106-172.17.0.2-1731613893194 heartbeating to localhost/127.0.0.1:38605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1923608106-172.17.0.2-1731613893194 (Datanode Uuid 4fa82d60-be63-499e-80ab-102485d2f75e) service to localhost/127.0.0.1:38605 2024-11-14T19:53:20,275 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data3/current/BP-1923608106-172.17.0.2-1731613893194 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:20,276 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data4/current/BP-1923608106-172.17.0.2-1731613893194 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:20,276 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:20,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fa036b2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:20,279 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@19556135{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:20,279 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:20,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b81322e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:20,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79ee2b95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:20,281 WARN [BP-1923608106-172.17.0.2-1731613893194 heartbeating to localhost/127.0.0.1:38605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:20,281 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:20,281 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:20,281 WARN [BP-1923608106-172.17.0.2-1731613893194 heartbeating to localhost/127.0.0.1:38605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1923608106-172.17.0.2-1731613893194 (Datanode Uuid cf114025-70b5-480e-beb6-8a9111a10378) service to localhost/127.0.0.1:38605 2024-11-14T19:53:20,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data1/current/BP-1923608106-172.17.0.2-1731613893194 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:20,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/cluster_9a647fad-9f65-00ab-d065-18f3692b702a/data/data2/current/BP-1923608106-172.17.0.2-1731613893194 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:20,282 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:20,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27b94a93{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:53:20,293 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@d38cdad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:20,293 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:20,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a7eb645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:20,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47a28521{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:20,305 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:53:20,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:53:20,351 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: regionserver/867b237d0fa7:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38605 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/867b237d0fa7:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:38605 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:38605 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/867b237d0fa7:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38605 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:38605 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38605 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5e079503 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/867b237d0fa7:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=275 (was 379), ProcessCount=11 (was 11), AvailableMemoryMB=6129 (was 6314) 2024-11-14T19:53:20,360 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=275, ProcessCount=11, AvailableMemoryMB=6128 2024-11-14T19:53:20,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:53:20,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.log.dir so I do NOT create it in target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9fbb0a6-2f81-8ee8-7117-e508e3e2ec35/hadoop.tmp.dir so I do NOT create it in target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38, deleteOnExit=true 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/test.cache.data in system properties and HBase conf 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:53:20,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:53:20,361 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:53:20,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:53:20,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:53:20,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:53:20,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:53:20,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:53:20,376 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:53:20,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:20,655 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:20,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:20,656 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:20,657 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:53:20,657 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:20,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73805f68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:20,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5740ec52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:20,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:53:20,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:53:20,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:53:20,671 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T19:53:20,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a757c24{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/java.io.tmpdir/jetty-localhost-43031-hadoop-hdfs-3_4_1-tests_jar-_-any-1506387088006237627/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:53:20,755 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38929dbc{HTTP/1.1, (http/1.1)}{localhost:43031} 2024-11-14T19:53:20,755 INFO [Time-limited test {}] server.Server(415): Started @112801ms 2024-11-14T19:53:20,768 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:53:20,946 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:20,951 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:20,952 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:20,952 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:20,952 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:53:20,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2898dd1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:20,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ccd91c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:21,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f10d386{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/java.io.tmpdir/jetty-localhost-33849-hadoop-hdfs-3_4_1-tests_jar-_-any-1980808682158858844/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:21,048 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6cbd40db{HTTP/1.1, (http/1.1)}{localhost:33849} 2024-11-14T19:53:21,048 INFO [Time-limited test {}] server.Server(415): Started @113094ms 2024-11-14T19:53:21,050 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:21,059 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:53:21,087 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:21,091 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:21,092 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:21,092 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:21,092 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:53:21,092 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20113a17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:21,093 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@209002c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:21,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1565fde7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/java.io.tmpdir/jetty-localhost-36741-hadoop-hdfs-3_4_1-tests_jar-_-any-11157251157736744787/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:21,188 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a05573a{HTTP/1.1, (http/1.1)}{localhost:36741} 2024-11-14T19:53:21,188 INFO [Time-limited test {}] server.Server(415): Started @113234ms 2024-11-14T19:53:21,191 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:21,605 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data1/current/BP-71556162-172.17.0.2-1731614000388/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:21,605 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data2/current/BP-71556162-172.17.0.2-1731614000388/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:21,629 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:21,632 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b32b663d86471f7 with lease ID 0xa066ee9b54c92ab1: Processing first storage report for DS-1ad74672-ee60-453e-86ff-11587ed8c906 from datanode DatanodeRegistration(127.0.0.1:34825, datanodeUuid=e99852e1-7463-4821-9aa3-44e19387ac78, infoPort=41875, infoSecurePort=0, ipcPort=41223, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388) 2024-11-14T19:53:21,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b32b663d86471f7 with lease ID 0xa066ee9b54c92ab1: from storage DS-1ad74672-ee60-453e-86ff-11587ed8c906 node DatanodeRegistration(127.0.0.1:34825, datanodeUuid=e99852e1-7463-4821-9aa3-44e19387ac78, infoPort=41875, infoSecurePort=0, ipcPort=41223, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:21,633 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b32b663d86471f7 with lease ID 0xa066ee9b54c92ab1: Processing first storage report for DS-7a30479b-2b7f-40be-a185-46e9fd5dd60c from datanode DatanodeRegistration(127.0.0.1:34825, datanodeUuid=e99852e1-7463-4821-9aa3-44e19387ac78, infoPort=41875, infoSecurePort=0, ipcPort=41223, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388) 2024-11-14T19:53:21,633 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b32b663d86471f7 with lease ID 0xa066ee9b54c92ab1: from storage DS-7a30479b-2b7f-40be-a185-46e9fd5dd60c node DatanodeRegistration(127.0.0.1:34825, datanodeUuid=e99852e1-7463-4821-9aa3-44e19387ac78, infoPort=41875, infoSecurePort=0, ipcPort=41223, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:21,766 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data3/current/BP-71556162-172.17.0.2-1731614000388/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:21,766 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data4/current/BP-71556162-172.17.0.2-1731614000388/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:21,785 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4e541cc17b653cc with lease ID 0xa066ee9b54c92ab2: Processing first storage report for DS-b88c4887-b20e-4b96-a3a2-959a960a5fe6 from datanode DatanodeRegistration(127.0.0.1:33783, datanodeUuid=765c2a68-9523-4216-9a49-fe4f826bdfb1, infoPort=35203, infoSecurePort=0, ipcPort=43689, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388) 2024-11-14T19:53:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4e541cc17b653cc with lease ID 0xa066ee9b54c92ab2: from storage DS-b88c4887-b20e-4b96-a3a2-959a960a5fe6 node DatanodeRegistration(127.0.0.1:33783, datanodeUuid=765c2a68-9523-4216-9a49-fe4f826bdfb1, infoPort=35203, infoSecurePort=0, ipcPort=43689, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T19:53:21,788 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4e541cc17b653cc with lease ID 0xa066ee9b54c92ab2: Processing first storage report for DS-f6f0ca04-ef59-4b35-9c01-722b7d4a2bb8 from datanode DatanodeRegistration(127.0.0.1:33783, datanodeUuid=765c2a68-9523-4216-9a49-fe4f826bdfb1, infoPort=35203, infoSecurePort=0, ipcPort=43689, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388) 2024-11-14T19:53:21,788 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4e541cc17b653cc with lease ID 0xa066ee9b54c92ab2: from storage DS-f6f0ca04-ef59-4b35-9c01-722b7d4a2bb8 node DatanodeRegistration(127.0.0.1:33783, datanodeUuid=765c2a68-9523-4216-9a49-fe4f826bdfb1, infoPort=35203, infoSecurePort=0, ipcPort=43689, storageInfo=lv=-57;cid=testClusterID;nsid=1860348481;c=1731614000388), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:21,827 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7 2024-11-14T19:53:21,843 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/zookeeper_0, clientPort=49678, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:53:21,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49678 2024-11-14T19:53:21,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:21,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:21,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:53:21,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:53:21,861 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07 with version=8 2024-11-14T19:53:21,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:53:21,864 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:53:21,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:21,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:21,864 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:53:21,865 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:21,865 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:53:21,865 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:53:21,865 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:53:21,866 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43165 2024-11-14T19:53:21,867 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43165 connecting to ZooKeeper ensemble=127.0.0.1:49678 2024-11-14T19:53:21,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:431650x0, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:53:21,907 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43165-0x1013c16905b0000 connected 2024-11-14T19:53:21,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:21,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:21,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:21,974 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07, hbase.cluster.distributed=false 2024-11-14T19:53:21,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:53:21,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43165 2024-11-14T19:53:21,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43165 2024-11-14T19:53:21,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43165 2024-11-14T19:53:21,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43165 2024-11-14T19:53:21,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43165 2024-11-14T19:53:21,996 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:53:21,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:21,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:21,996 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:53:21,997 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:21,997 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:53:21,997 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:53:21,997 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:53:21,998 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35175 2024-11-14T19:53:22,000 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35175 connecting to ZooKeeper ensemble=127.0.0.1:49678 2024-11-14T19:53:22,001 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:22,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:22,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351750x0, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:53:22,012 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:351750x0, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:22,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35175-0x1013c16905b0001 connected 2024-11-14T19:53:22,012 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:53:22,013 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:53:22,013 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:53:22,014 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:53:22,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35175 2024-11-14T19:53:22,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35175 2024-11-14T19:53:22,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35175 2024-11-14T19:53:22,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35175 2024-11-14T19:53:22,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35175 2024-11-14T19:53:22,038 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:43165 2024-11-14T19:53:22,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:22,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:22,045 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:53:22,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,053 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:53:22,054 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,43165,1731614001864 from backup master directory 2024-11-14T19:53:22,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:22,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,061 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:53:22,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:22,061 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,067 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/hbase.id] with ID: 244ac68b-39a4-4866-b038-35e3acc1638e 2024-11-14T19:53:22,067 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/.tmp/hbase.id 2024-11-14T19:53:22,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:53:22,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:53:22,076 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/.tmp/hbase.id]:[hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/hbase.id] 2024-11-14T19:53:22,090 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:22,090 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:53:22,092 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T19:53:22,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:53:22,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:53:22,111 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:53:22,112 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:53:22,112 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:22,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:53:22,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:53:22,124 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store 2024-11-14T19:53:22,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:53:22,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:53:22,133 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:22,133 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:53:22,134 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:22,134 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:22,134 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:53:22,134 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:22,134 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:22,134 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614002133Disabling compacts and flushes for region at 1731614002133Disabling writes for close at 1731614002134 (+1 ms)Writing region close event to WAL at 1731614002134Closed at 1731614002134 2024-11-14T19:53:22,135 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/.initializing 2024-11-14T19:53:22,135 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/WALs/867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,138 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C43165%2C1731614001864, suffix=, logDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/WALs/867b237d0fa7,43165,1731614001864, archiveDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/oldWALs, maxLogs=10 2024-11-14T19:53:22,139 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C43165%2C1731614001864.1731614002139 2024-11-14T19:53:22,145 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/WALs/867b237d0fa7,43165,1731614001864/867b237d0fa7%2C43165%2C1731614001864.1731614002139 2024-11-14T19:53:22,146 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35203:35203),(127.0.0.1/127.0.0.1:41875:41875)] 2024-11-14T19:53:22,146 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:53:22,147 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:22,147 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,147 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:53:22,151 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:22,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:53:22,153 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:22,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:53:22,158 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:22,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,160 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:53:22,160 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:22,161 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,162 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,163 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,164 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,164 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,165 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:53:22,166 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:22,169 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:53:22,169 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796746, jitterRate=0.013115674257278442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:53:22,170 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731614002147Initializing all the Stores at 1731614002148 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614002148Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614002149 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614002149Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614002149Cleaning up temporary data from old regions at 1731614002164 (+15 ms)Region opened successfully at 1731614002170 (+6 ms) 2024-11-14T19:53:22,170 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:53:22,175 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65876603, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:53:22,176 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:53:22,176 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:53:22,176 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:53:22,176 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:53:22,177 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T19:53:22,177 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T19:53:22,177 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:53:22,180 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:53:22,181 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:53:22,186 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:53:22,186 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:53:22,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:53:22,194 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:53:22,195 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:53:22,196 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:53:22,203 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:53:22,204 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:53:22,211 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:53:22,216 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:53:22,226 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:53:22,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:22,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:22,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,237 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,43165,1731614001864, sessionid=0x1013c16905b0000, setting cluster-up flag (Was=false) 2024-11-14T19:53:22,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,278 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:53:22,281 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,328 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:53:22,329 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,43165,1731614001864 2024-11-14T19:53:22,331 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:53:22,333 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:22,333 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:53:22,334 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:53:22,334 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,43165,1731614001864 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:53:22,336 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731614032337 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:53:22,337 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:53:22,338 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,338 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:53:22,338 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:53:22,338 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:53:22,339 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:22,339 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:53:22,340 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,341 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:53:22,343 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:53:22,343 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:53:22,344 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614002344,5,FailOnTimeoutGroup] 2024-11-14T19:53:22,344 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614002344,5,FailOnTimeoutGroup] 2024-11-14T19:53:22,344 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,344 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:53:22,344 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,344 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:53:22,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:53:22,351 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:53:22,352 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07 2024-11-14T19:53:22,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:53:22,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:53:22,363 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:22,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:53:22,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:53:22,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:22,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:53:22,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:53:22,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:22,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:53:22,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:53:22,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:22,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:53:22,377 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:53:22,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:22,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:22,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:53:22,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740 2024-11-14T19:53:22,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740 2024-11-14T19:53:22,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:53:22,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:53:22,382 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:53:22,383 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:53:22,386 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:53:22,387 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757942, jitterRate=-0.0362270325422287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:53:22,388 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731614002363Initializing all the Stores at 1731614002365 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614002365Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614002365Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614002365Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614002365Cleaning up temporary data from old regions at 1731614002381 (+16 ms)Region opened successfully at 1731614002388 (+7 ms) 2024-11-14T19:53:22,388 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:53:22,389 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:53:22,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:53:22,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:53:22,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:53:22,389 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:53:22,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614002388Disabling compacts and flushes for region at 1731614002388Disabling writes for close at 1731614002389 (+1 ms)Writing region close event to WAL at 1731614002389Closed at 1731614002389 2024-11-14T19:53:22,391 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:22,392 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:53:22,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:53:22,394 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:53:22,395 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:53:22,428 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(746): ClusterId : 244ac68b-39a4-4866-b038-35e3acc1638e 2024-11-14T19:53:22,428 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:53:22,437 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:53:22,437 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:53:22,445 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:53:22,446 DEBUG [RS:0;867b237d0fa7:35175 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e448cca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:53:22,457 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:35175 2024-11-14T19:53:22,457 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:53:22,457 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:53:22,457 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:53:22,458 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,43165,1731614001864 with port=35175, startcode=1731614001996 2024-11-14T19:53:22,459 DEBUG [RS:0;867b237d0fa7:35175 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:53:22,461 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47433, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:53:22,462 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43165 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,462 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43165 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,465 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07 2024-11-14T19:53:22,465 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33087 2024-11-14T19:53:22,465 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:53:22,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:53:22,470 DEBUG [RS:0;867b237d0fa7:35175 {}] zookeeper.ZKUtil(111): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,470 WARN [RS:0;867b237d0fa7:35175 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:53:22,470 INFO [RS:0;867b237d0fa7:35175 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:22,470 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/WALs/867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,470 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,35175,1731614001996] 2024-11-14T19:53:22,474 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:53:22,477 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:53:22,478 INFO [RS:0;867b237d0fa7:35175 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:53:22,478 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,478 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:53:22,480 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:53:22,480 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,480 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,480 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,480 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,481 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,482 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:22,482 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:53:22,482 DEBUG [RS:0;867b237d0fa7:35175 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:53:22,482 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,483 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,483 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,483 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,483 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,483 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,35175,1731614001996-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:53:22,499 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:53:22,499 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,35175,1731614001996-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,499 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,499 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.Replication(171): 867b237d0fa7,35175,1731614001996 started 2024-11-14T19:53:22,513 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:22,513 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,35175,1731614001996, RpcServer on 867b237d0fa7/172.17.0.2:35175, sessionid=0x1013c16905b0001 2024-11-14T19:53:22,514 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:53:22,514 DEBUG [RS:0;867b237d0fa7:35175 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,514 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,35175,1731614001996' 2024-11-14T19:53:22,514 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:53:22,515 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:53:22,515 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:53:22,515 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:53:22,515 DEBUG [RS:0;867b237d0fa7:35175 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,515 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,35175,1731614001996' 2024-11-14T19:53:22,515 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:53:22,516 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:53:22,517 DEBUG [RS:0;867b237d0fa7:35175 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:53:22,517 INFO [RS:0;867b237d0fa7:35175 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:53:22,517 INFO [RS:0;867b237d0fa7:35175 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:53:22,546 WARN [867b237d0fa7:43165 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T19:53:22,622 INFO [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C35175%2C1731614001996, suffix=, logDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/WALs/867b237d0fa7,35175,1731614001996, archiveDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/oldWALs, maxLogs=32 2024-11-14T19:53:22,625 INFO [RS:0;867b237d0fa7:35175 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C35175%2C1731614001996.1731614002625 2024-11-14T19:53:22,633 INFO [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/WALs/867b237d0fa7,35175,1731614001996/867b237d0fa7%2C35175%2C1731614001996.1731614002625 2024-11-14T19:53:22,635 DEBUG [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35203:35203),(127.0.0.1/127.0.0.1:41875:41875)] 2024-11-14T19:53:22,796 DEBUG [867b237d0fa7:43165 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:53:22,797 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,35175,1731614001996 2024-11-14T19:53:22,799 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,35175,1731614001996, state=OPENING 2024-11-14T19:53:22,834 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:53:22,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:22,845 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:53:22,845 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:22,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,35175,1731614001996}] 2024-11-14T19:53:22,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:22,999 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:53:23,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37153, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:53:23,007 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:53:23,007 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:23,010 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C35175%2C1731614001996.meta, suffix=.meta, logDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/WALs/867b237d0fa7,35175,1731614001996, archiveDir=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/oldWALs, maxLogs=32 2024-11-14T19:53:23,012 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C35175%2C1731614001996.meta.1731614003012.meta 2024-11-14T19:53:23,020 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/WALs/867b237d0fa7,35175,1731614001996/867b237d0fa7%2C35175%2C1731614001996.meta.1731614003012.meta 2024-11-14T19:53:23,021 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35203:35203),(127.0.0.1/127.0.0.1:41875:41875)] 2024-11-14T19:53:23,022 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:53:23,023 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:53:23,023 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:53:23,024 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:53:23,024 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:53:23,024 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:23,024 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:53:23,024 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:53:23,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:53:23,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:53:23,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:23,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:23,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:53:23,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:53:23,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:23,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:23,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:53:23,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:53:23,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:23,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:23,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:53:23,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:53:23,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:23,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:23,035 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:53:23,036 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740 2024-11-14T19:53:23,038 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740 2024-11-14T19:53:23,039 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:53:23,039 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:53:23,040 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:53:23,042 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:53:23,043 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756880, jitterRate=-0.03757840394973755}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:53:23,043 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:53:23,044 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731614003024Writing region info on filesystem at 1731614003024Initializing all the Stores at 1731614003026 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614003026Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614003026Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614003026Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614003027 (+1 ms)Cleaning up temporary data from old regions at 1731614003039 (+12 ms)Running coprocessor post-open hooks at 1731614003043 (+4 ms)Region opened successfully at 1731614003044 (+1 ms) 2024-11-14T19:53:23,046 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731614002999 2024-11-14T19:53:23,049 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:53:23,050 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:53:23,051 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,35175,1731614001996 2024-11-14T19:53:23,052 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,35175,1731614001996, state=OPEN 2024-11-14T19:53:23,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:53:23,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:53:23,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:23,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:23,082 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,35175,1731614001996 2024-11-14T19:53:23,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:53:23,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,35175,1731614001996 in 238 msec 2024-11-14T19:53:23,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:53:23,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 696 msec 2024-11-14T19:53:23,095 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:23,095 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:53:23,098 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:53:23,098 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,35175,1731614001996, seqNum=-1] 2024-11-14T19:53:23,099 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:53:23,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:53:23,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 793 msec 2024-11-14T19:53:23,131 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731614003131, completionTime=-1 2024-11-14T19:53:23,132 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:53:23,132 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:53:23,135 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:53:23,136 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731614063136 2024-11-14T19:53:23,136 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614123136 2024-11-14T19:53:23,136 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 4 msec 2024-11-14T19:53:23,136 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43165,1731614001864-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,137 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43165,1731614001864-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,137 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43165,1731614001864-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,137 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:43165, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,137 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,138 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,142 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:53:23,151 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.090sec 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43165,1731614001864-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:53:23,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43165,1731614001864-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:53:23,156 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:53:23,156 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:53:23,156 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43165,1731614001864-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:23,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75c249c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:53:23,229 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,43165,-1 for getting cluster id 2024-11-14T19:53:23,229 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:53:23,231 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '244ac68b-39a4-4866-b038-35e3acc1638e' 2024-11-14T19:53:23,233 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:53:23,233 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "244ac68b-39a4-4866-b038-35e3acc1638e" 2024-11-14T19:53:23,234 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cf61931, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:53:23,234 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,43165,-1] 2024-11-14T19:53:23,234 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:53:23,235 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:23,239 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:53:23,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40637190, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:53:23,242 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:53:23,255 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,35175,1731614001996, seqNum=-1] 2024-11-14T19:53:23,255 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:53:23,267 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50568, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:53:23,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,43165,1731614001864 2024-11-14T19:53:23,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:23,278 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:53:23,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:53:23,278 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:53:23,279 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:53:23,279 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:23,279 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:53:23,279 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:23,279 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:53:23,280 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1353930573, stopped=false 2024-11-14T19:53:23,280 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,43165,1731614001864 2024-11-14T19:53:23,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:23,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:23,293 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:53:23,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:23,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:23,293 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:53:23,294 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:53:23,294 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:23,294 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:23,294 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,35175,1731614001996' ***** 2024-11-14T19:53:23,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:23,295 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:53:23,295 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:53:23,295 INFO [RS:0;867b237d0fa7:35175 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:53:23,295 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:53:23,295 INFO [RS:0;867b237d0fa7:35175 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:53:23,295 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,35175,1731614001996 2024-11-14T19:53:23,295 INFO [RS:0;867b237d0fa7:35175 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:53:23,295 INFO [RS:0;867b237d0fa7:35175 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:35175. 2024-11-14T19:53:23,295 DEBUG [RS:0;867b237d0fa7:35175 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:53:23,295 DEBUG [RS:0;867b237d0fa7:35175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:23,296 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:53:23,296 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:53:23,296 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:53:23,296 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:53:23,296 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T19:53:23,296 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T19:53:23,296 DEBUG [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T19:53:23,296 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:53:23,296 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:53:23,297 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:53:23,297 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:53:23,297 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:53:23,297 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T19:53:23,326 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/.tmp/ns/f819229d567b48458c8db0e165419088 is 43, key is default/ns:d/1731614003102/Put/seqid=0 2024-11-14T19:53:23,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741835_1011 (size=5153) 2024-11-14T19:53:23,345 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/.tmp/ns/f819229d567b48458c8db0e165419088 2024-11-14T19:53:23,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741835_1011 (size=5153) 2024-11-14T19:53:23,355 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/.tmp/ns/f819229d567b48458c8db0e165419088 as hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/ns/f819229d567b48458c8db0e165419088 2024-11-14T19:53:23,370 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/ns/f819229d567b48458c8db0e165419088, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T19:53:23,372 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=6, compaction requested=false 2024-11-14T19:53:23,372 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T19:53:23,386 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T19:53:23,387 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:53:23,387 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:53:23,388 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614003296Running coprocessor pre-close hooks at 1731614003296Disabling compacts and flushes for region at 1731614003296Disabling writes for close at 1731614003297 (+1 ms)Obtaining lock to block concurrent updates at 1731614003297Preparing flush snapshotting stores in 1588230740 at 1731614003297Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731614003298 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731614003299 (+1 ms)Flushing 1588230740/ns: creating writer at 1731614003299Flushing 1588230740/ns: appending metadata at 1731614003326 (+27 ms)Flushing 1588230740/ns: closing flushed file at 1731614003326Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@264c97e5: reopening flushed file at 1731614003354 (+28 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=6, compaction requested=false at 1731614003372 (+18 ms)Writing region close event to WAL at 1731614003380 (+8 ms)Running coprocessor post-close hooks at 1731614003387 (+7 ms)Closed at 1731614003387 2024-11-14T19:53:23,388 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:53:23,483 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T19:53:23,483 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T19:53:23,497 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,35175,1731614001996; all regions closed. 2024-11-14T19:53:23,497 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,497 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,498 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,498 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,498 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741834_1010 (size=1152) 2024-11-14T19:53:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741834_1010 (size=1152) 2024-11-14T19:53:23,504 DEBUG [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/oldWALs 2024-11-14T19:53:23,504 INFO [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C35175%2C1731614001996.meta:.meta(num 1731614003012) 2024-11-14T19:53:23,504 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,504 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,504 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,504 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,505 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741833_1009 (size=93) 2024-11-14T19:53:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741833_1009 (size=93) 2024-11-14T19:53:23,509 DEBUG [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/oldWALs 2024-11-14T19:53:23,509 INFO [RS:0;867b237d0fa7:35175 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C35175%2C1731614001996:(num 1731614002625) 2024-11-14T19:53:23,509 DEBUG [RS:0;867b237d0fa7:35175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:23,509 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:53:23,509 INFO [RS:0;867b237d0fa7:35175 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:53:23,509 INFO [RS:0;867b237d0fa7:35175 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T19:53:23,510 INFO [RS:0;867b237d0fa7:35175 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:53:23,510 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:53:23,510 INFO [RS:0;867b237d0fa7:35175 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35175 2024-11-14T19:53:23,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,35175,1731614001996 2024-11-14T19:53:23,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:53:23,519 INFO [RS:0;867b237d0fa7:35175 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:53:23,528 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,35175,1731614001996] 2024-11-14T19:53:23,536 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,35175,1731614001996 already deleted, retry=false 2024-11-14T19:53:23,536 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,35175,1731614001996 expired; onlineServers=0 2024-11-14T19:53:23,536 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,43165,1731614001864' ***** 2024-11-14T19:53:23,536 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:53:23,536 INFO [M:0;867b237d0fa7:43165 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:53:23,536 INFO [M:0;867b237d0fa7:43165 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:53:23,537 DEBUG [M:0;867b237d0fa7:43165 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:53:23,537 DEBUG [M:0;867b237d0fa7:43165 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:53:23,537 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:53:23,537 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614002344 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614002344,5,FailOnTimeoutGroup] 2024-11-14T19:53:23,537 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614002344 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614002344,5,FailOnTimeoutGroup] 2024-11-14T19:53:23,537 INFO [M:0;867b237d0fa7:43165 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:53:23,537 INFO [M:0;867b237d0fa7:43165 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:53:23,537 DEBUG [M:0;867b237d0fa7:43165 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:53:23,537 INFO [M:0;867b237d0fa7:43165 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:53:23,537 INFO [M:0;867b237d0fa7:43165 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:53:23,538 INFO [M:0;867b237d0fa7:43165 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:53:23,538 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:53:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:53:23,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:23,544 DEBUG [M:0;867b237d0fa7:43165 {}] zookeeper.ZKUtil(347): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T19:53:23,544 WARN [M:0;867b237d0fa7:43165 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T19:53:23,545 INFO [M:0;867b237d0fa7:43165 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/.lastflushedseqids 2024-11-14T19:53:23,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741836_1012 (size=99) 2024-11-14T19:53:23,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741836_1012 (size=99) 2024-11-14T19:53:23,561 INFO [M:0;867b237d0fa7:43165 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:53:23,561 INFO [M:0;867b237d0fa7:43165 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:53:23,561 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:53:23,561 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:23,561 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:23,561 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:53:23,561 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:23,562 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T19:53:23,584 DEBUG [M:0;867b237d0fa7:43165 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2f6980349f10429aadac879bfc6467d2 is 82, key is hbase:meta,,1/info:regioninfo/1731614003051/Put/seqid=0 2024-11-14T19:53:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741837_1013 (size=5672) 2024-11-14T19:53:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741837_1013 (size=5672) 2024-11-14T19:53:23,592 INFO [M:0;867b237d0fa7:43165 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2f6980349f10429aadac879bfc6467d2 2024-11-14T19:53:23,616 DEBUG [M:0;867b237d0fa7:43165 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba70a36a901c48769581f7925a7ea97a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731614003126/Put/seqid=0 2024-11-14T19:53:23,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:23,628 INFO [RS:0;867b237d0fa7:35175 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:53:23,628 INFO [RS:0;867b237d0fa7:35175 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,35175,1731614001996; zookeeper connection closed. 2024-11-14T19:53:23,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35175-0x1013c16905b0001, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:23,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741838_1014 (size=5275) 2024-11-14T19:53:23,642 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2533f619 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2533f619 2024-11-14T19:53:23,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741838_1014 (size=5275) 2024-11-14T19:53:23,642 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T19:53:23,642 INFO [M:0;867b237d0fa7:43165 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba70a36a901c48769581f7925a7ea97a 2024-11-14T19:53:23,681 DEBUG [M:0;867b237d0fa7:43165 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0df7b1b003404718ad068fbd940ce452 is 69, key is 867b237d0fa7,35175,1731614001996/rs:state/1731614002463/Put/seqid=0 2024-11-14T19:53:23,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741839_1015 (size=5156) 2024-11-14T19:53:23,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741839_1015 (size=5156) 2024-11-14T19:53:23,699 INFO [M:0;867b237d0fa7:43165 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0df7b1b003404718ad068fbd940ce452 2024-11-14T19:53:23,732 DEBUG [M:0;867b237d0fa7:43165 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/356eb425be5b4e398a8dc38995000638 is 52, key is load_balancer_on/state:d/1731614003275/Put/seqid=0 2024-11-14T19:53:23,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741840_1016 (size=5056) 2024-11-14T19:53:23,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741840_1016 (size=5056) 2024-11-14T19:53:23,750 INFO [M:0;867b237d0fa7:43165 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/356eb425be5b4e398a8dc38995000638 2024-11-14T19:53:23,766 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2f6980349f10429aadac879bfc6467d2 as hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2f6980349f10429aadac879bfc6467d2 2024-11-14T19:53:23,774 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2f6980349f10429aadac879bfc6467d2, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T19:53:23,776 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ba70a36a901c48769581f7925a7ea97a as hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ba70a36a901c48769581f7925a7ea97a 2024-11-14T19:53:23,788 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ba70a36a901c48769581f7925a7ea97a, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T19:53:23,789 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0df7b1b003404718ad068fbd940ce452 as hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0df7b1b003404718ad068fbd940ce452 2024-11-14T19:53:23,810 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0df7b1b003404718ad068fbd940ce452, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T19:53:23,813 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/356eb425be5b4e398a8dc38995000638 as hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/356eb425be5b4e398a8dc38995000638 2024-11-14T19:53:23,821 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33087/user/jenkins/test-data/49f6d915-e00c-f310-2fbe-8ed1f24bcd07/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/356eb425be5b4e398a8dc38995000638, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T19:53:23,822 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 261ms, sequenceid=29, compaction requested=false 2024-11-14T19:53:23,835 INFO [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:23,835 DEBUG [M:0;867b237d0fa7:43165 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614003561Disabling compacts and flushes for region at 1731614003561Disabling writes for close at 1731614003561Obtaining lock to block concurrent updates at 1731614003562 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731614003562Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731614003562Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731614003563 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731614003563Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731614003584 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731614003584Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614003599 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614003615 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614003615Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614003657 (+42 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614003680 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614003680Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614003708 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614003731 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614003731Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@696e74af: reopening flushed file at 1731614003764 (+33 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5070f19b: reopening flushed file at 1731614003775 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@469aa386: reopening flushed file at 1731614003788 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@89d4036: reopening flushed file at 1731614003811 (+23 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 261ms, sequenceid=29, compaction requested=false at 1731614003822 (+11 ms)Writing region close event to WAL at 1731614003835 (+13 ms)Closed at 1731614003835 2024-11-14T19:53:23,836 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,836 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,836 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,836 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,837 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34825 is added to blk_1073741830_1006 (size=10311) 2024-11-14T19:53:23,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33783 is added to blk_1073741830_1006 (size=10311) 2024-11-14T19:53:23,859 INFO [M:0;867b237d0fa7:43165 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:53:23,859 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:53:23,859 INFO [M:0;867b237d0fa7:43165 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43165 2024-11-14T19:53:23,860 INFO [M:0;867b237d0fa7:43165 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:53:23,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:23,993 INFO [M:0;867b237d0fa7:43165 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:53:23,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43165-0x1013c16905b0000, quorum=127.0.0.1:49678, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:53:23,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1565fde7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:23,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a05573a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:23,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:23,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@209002c9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:23,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20113a17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:23,998 WARN [BP-71556162-172.17.0.2-1731614000388 heartbeating to localhost/127.0.0.1:33087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:23,998 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:23,999 WARN [BP-71556162-172.17.0.2-1731614000388 heartbeating to localhost/127.0.0.1:33087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-71556162-172.17.0.2-1731614000388 (Datanode Uuid 765c2a68-9523-4216-9a49-fe4f826bdfb1) service to localhost/127.0.0.1:33087 2024-11-14T19:53:23,999 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:23,999 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data3/current/BP-71556162-172.17.0.2-1731614000388 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:24,000 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data4/current/BP-71556162-172.17.0.2-1731614000388 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:24,000 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:24,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f10d386{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:24,012 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6cbd40db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:24,012 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:24,012 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ccd91c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:24,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2898dd1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:24,015 WARN [BP-71556162-172.17.0.2-1731614000388 heartbeating to localhost/127.0.0.1:33087 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:24,015 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:24,015 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:24,015 WARN [BP-71556162-172.17.0.2-1731614000388 heartbeating to localhost/127.0.0.1:33087 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-71556162-172.17.0.2-1731614000388 (Datanode Uuid e99852e1-7463-4821-9aa3-44e19387ac78) service to localhost/127.0.0.1:33087 2024-11-14T19:53:24,016 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data1/current/BP-71556162-172.17.0.2-1731614000388 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:24,016 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/cluster_f9e9aa06-6380-8f93-87e4-7a4ad33e0a38/data/data2/current/BP-71556162-172.17.0.2-1731614000388 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:24,017 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:24,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a757c24{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:53:24,026 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38929dbc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:24,026 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:24,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5740ec52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:24,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73805f68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:24,041 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:53:24,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:53:24,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:53:24,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.log.dir so I do NOT create it in target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc 2024-11-14T19:53:24,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/77b8b382-4939-1aad-a314-1885e49b08e7/hadoop.tmp.dir so I do NOT create it in target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc 2024-11-14T19:53:24,066 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f, deleteOnExit=true 2024-11-14T19:53:24,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:53:24,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/test.cache.data in system properties and HBase conf 2024-11-14T19:53:24,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:53:24,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:53:24,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:53:24,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:53:24,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:53:24,067 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:53:24,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:53:24,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:53:24,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:53:24,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:53:24,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:53:24,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:53:24,088 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:53:24,302 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:24,310 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:24,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:24,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:24,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:53:24,322 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:24,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@279773e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:24,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2487e01a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:24,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ac8078c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-35011-hadoop-hdfs-3_4_1-tests_jar-_-any-8765381464596532632/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:53:24,426 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44248139{HTTP/1.1, (http/1.1)}{localhost:35011} 2024-11-14T19:53:24,426 INFO [Time-limited test {}] server.Server(415): Started @116472ms 2024-11-14T19:53:24,442 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:53:24,484 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:53:24,621 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:24,624 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:24,625 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:24,625 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:24,625 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:53:24,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ba4cdcb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:24,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3662c58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:24,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cb3fa36{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-33129-hadoop-hdfs-3_4_1-tests_jar-_-any-10307499277573398040/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:24,720 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17f0fddd{HTTP/1.1, (http/1.1)}{localhost:33129} 2024-11-14T19:53:24,721 INFO [Time-limited test {}] server.Server(415): Started @116766ms 2024-11-14T19:53:24,722 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:24,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:24,752 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:24,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:24,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:24,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:53:24,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c7a788c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:24,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@150f352b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:24,774 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:24,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:24,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70c9360c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-32915-hadoop-hdfs-3_4_1-tests_jar-_-any-6374044378390353353/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:24,847 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47b51e28{HTTP/1.1, (http/1.1)}{localhost:32915} 2024-11-14T19:53:24,847 INFO [Time-limited test {}] server.Server(415): Started @116892ms 2024-11-14T19:53:24,848 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:25,293 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:53:25,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:25,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:25,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:25,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:25,356 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data1/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:25,357 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data2/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:25,382 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x780efa4c1b8ee04a with lease ID 0xce2e299b1493b0ef: Processing first storage report for DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0 from datanode DatanodeRegistration(127.0.0.1:42741, datanodeUuid=20f8daf3-02a9-46b0-92b2-dbcaae5b6dc9, infoPort=33355, infoSecurePort=0, ipcPort=46739, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x780efa4c1b8ee04a with lease ID 0xce2e299b1493b0ef: from storage DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0 node DatanodeRegistration(127.0.0.1:42741, datanodeUuid=20f8daf3-02a9-46b0-92b2-dbcaae5b6dc9, infoPort=33355, infoSecurePort=0, ipcPort=46739, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x780efa4c1b8ee04a with lease ID 0xce2e299b1493b0ef: Processing first storage report for DS-d1837837-f7d1-48fc-be28-0c0461be21d2 from datanode DatanodeRegistration(127.0.0.1:42741, datanodeUuid=20f8daf3-02a9-46b0-92b2-dbcaae5b6dc9, infoPort=33355, infoSecurePort=0, ipcPort=46739, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:25,385 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x780efa4c1b8ee04a with lease ID 0xce2e299b1493b0ef: from storage DS-d1837837-f7d1-48fc-be28-0c0461be21d2 node DatanodeRegistration(127.0.0.1:42741, datanodeUuid=20f8daf3-02a9-46b0-92b2-dbcaae5b6dc9, infoPort=33355, infoSecurePort=0, ipcPort=46739, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:25,486 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data3/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:25,486 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data4/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:25,505 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:25,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3dfab9e678e7515 with lease ID 0xce2e299b1493b0f0: Processing first storage report for DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd from datanode DatanodeRegistration(127.0.0.1:33323, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=44249, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:25,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3dfab9e678e7515 with lease ID 0xce2e299b1493b0f0: from storage DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd node DatanodeRegistration(127.0.0.1:33323, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=44249, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:25,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc3dfab9e678e7515 with lease ID 0xce2e299b1493b0f0: Processing first storage report for DS-c07377dd-703c-4de8-aba4-82533e9b65b7 from datanode DatanodeRegistration(127.0.0.1:33323, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=44249, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:25,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc3dfab9e678e7515 with lease ID 0xce2e299b1493b0f0: from storage DS-c07377dd-703c-4de8-aba4-82533e9b65b7 node DatanodeRegistration(127.0.0.1:33323, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=44249, infoSecurePort=0, ipcPort=38483, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:25,580 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc 2024-11-14T19:53:25,582 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/zookeeper_0, clientPort=50637, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:53:25,583 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50637 2024-11-14T19:53:25,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:53:25,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:53:25,597 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1 with version=8 2024-11-14T19:53:25,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:53:25,599 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:53:25,600 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:53:25,601 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46651 2024-11-14T19:53:25,602 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46651 connecting to ZooKeeper ensemble=127.0.0.1:50637 2024-11-14T19:53:25,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466510x0, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:53:25,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46651-0x1013c169ef70000 connected 2024-11-14T19:53:25,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,704 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:25,707 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1, hbase.cluster.distributed=false 2024-11-14T19:53:25,709 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:53:25,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46651 2024-11-14T19:53:25,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46651 2024-11-14T19:53:25,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46651 2024-11-14T19:53:25,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46651 2024-11-14T19:53:25,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46651 2024-11-14T19:53:25,730 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:53:25,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:25,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:25,730 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:53:25,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:25,730 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:53:25,730 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:53:25,731 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:53:25,732 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38355 2024-11-14T19:53:25,734 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38355 connecting to ZooKeeper ensemble=127.0.0.1:50637 2024-11-14T19:53:25,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383550x0, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:53:25,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:53:25,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38355-0x1013c169ef70001 connected 2024-11-14T19:53:25,751 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:53:25,752 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:53:25,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:53:25,754 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:53:25,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38355 2024-11-14T19:53:25,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38355 2024-11-14T19:53:25,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38355 2024-11-14T19:53:25,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38355 2024-11-14T19:53:25,757 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38355 2024-11-14T19:53:25,774 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:46651 2024-11-14T19:53:25,774 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,46651,1731614005599 2024-11-14T19:53:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:25,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:25,784 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,46651,1731614005599 2024-11-14T19:53:25,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:53:25,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,795 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:53:25,795 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,46651,1731614005599 from backup master directory 2024-11-14T19:53:25,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:25,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,46651,1731614005599 2024-11-14T19:53:25,802 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:53:25,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:53:25,802 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,46651,1731614005599 2024-11-14T19:53:25,807 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/hbase.id] with ID: cbbcad62-88d6-44cd-8810-26bb81fe1a4b 2024-11-14T19:53:25,807 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/.tmp/hbase.id 2024-11-14T19:53:25,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:53:25,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:53:25,814 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/.tmp/hbase.id]:[hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/hbase.id] 2024-11-14T19:53:25,827 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:25,827 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:53:25,829 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T19:53:25,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:53:25,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:53:25,845 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:53:25,846 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:53:25,847 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:25,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:53:25,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:53:25,857 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store 2024-11-14T19:53:25,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:53:25,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:53:25,865 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:25,865 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:53:25,865 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:25,865 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:25,865 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:53:25,865 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:25,865 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:53:25,865 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614005865Disabling compacts and flushes for region at 1731614005865Disabling writes for close at 1731614005865Writing region close event to WAL at 1731614005865Closed at 1731614005865 2024-11-14T19:53:25,866 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/.initializing 2024-11-14T19:53:25,866 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599 2024-11-14T19:53:25,869 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C46651%2C1731614005599, suffix=, logDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599, archiveDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/oldWALs, maxLogs=10 2024-11-14T19:53:25,870 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C46651%2C1731614005599.1731614005869 2024-11-14T19:53:25,876 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 2024-11-14T19:53:25,879 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33355:33355),(127.0.0.1/127.0.0.1:44249:44249)] 2024-11-14T19:53:25,880 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:53:25,880 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:25,880 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,880 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:53:25,884 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:25,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:25,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:53:25,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:25,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:25,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:53:25,888 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:25,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:25,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:53:25,890 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:25,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:25,891 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,891 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,892 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,893 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,893 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,894 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:53:25,895 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:53:25,898 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:53:25,899 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816152, jitterRate=0.03779160976409912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:53:25,900 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731614005880Initializing all the Stores at 1731614005882 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614005882Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614005882Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614005882Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614005882Cleaning up temporary data from old regions at 1731614005893 (+11 ms)Region opened successfully at 1731614005900 (+7 ms) 2024-11-14T19:53:25,901 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:53:25,905 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2689a2f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:53:25,906 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:53:25,906 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:53:25,906 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:53:25,906 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:53:25,907 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T19:53:25,908 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T19:53:25,908 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:53:25,910 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:53:25,911 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:53:25,919 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:53:25,919 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:53:25,920 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:53:25,927 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:53:25,928 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:53:25,930 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:53:25,936 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:53:25,937 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:53:25,944 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:53:25,946 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:53:25,952 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:53:25,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:25,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:53:25,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,962 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,46651,1731614005599, sessionid=0x1013c169ef70000, setting cluster-up flag (Was=false) 2024-11-14T19:53:25,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:25,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:26,002 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:53:26,004 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,46651,1731614005599 2024-11-14T19:53:26,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:26,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:26,044 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:53:26,045 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,46651,1731614005599 2024-11-14T19:53:26,046 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:53:26,048 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:26,049 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:53:26,049 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:53:26,049 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,46651,1731614005599 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:53:26,050 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:53:26,051 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731614036051 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:53:26,052 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,053 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:53:26,053 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:53:26,053 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:53:26,053 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:26,053 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:53:26,053 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:53:26,053 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:53:26,053 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614006053,5,FailOnTimeoutGroup] 2024-11-14T19:53:26,054 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614006053,5,FailOnTimeoutGroup] 2024-11-14T19:53:26,054 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,054 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:53:26,054 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,054 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,054 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,054 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:53:26,060 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(746): ClusterId : cbbcad62-88d6-44cd-8810-26bb81fe1a4b 2024-11-14T19:53:26,060 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:53:26,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:53:26,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:53:26,062 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:53:26,062 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1 2024-11-14T19:53:26,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:53:26,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:53:26,070 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:53:26,070 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:53:26,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:26,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:53:26,073 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:53:26,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:53:26,076 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:53:26,076 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:53:26,078 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:53:26,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:53:26,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,078 DEBUG [RS:0;867b237d0fa7:38355 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a36452, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:53:26,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:53:26,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:53:26,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:53:26,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740 2024-11-14T19:53:26,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740 2024-11-14T19:53:26,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:53:26,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:53:26,085 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:53:26,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:53:26,089 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:53:26,089 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800512, jitterRate=0.017904192209243774}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:53:26,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731614006071Initializing all the Stores at 1731614006072 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614006072Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614006072Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614006072Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614006072Cleaning up temporary data from old regions at 1731614006085 (+13 ms)Region opened successfully at 1731614006090 (+5 ms) 2024-11-14T19:53:26,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:53:26,091 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:53:26,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:53:26,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:53:26,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:53:26,091 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:53:26,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614006091Disabling compacts and flushes for region at 1731614006091Disabling writes for close at 1731614006091Writing region close event to WAL at 1731614006091Closed at 1731614006091 2024-11-14T19:53:26,092 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:38355 2024-11-14T19:53:26,093 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:53:26,093 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:53:26,093 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:53:26,093 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:26,093 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:53:26,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:53:26,093 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,46651,1731614005599 with port=38355, startcode=1731614005730 2024-11-14T19:53:26,094 DEBUG [RS:0;867b237d0fa7:38355 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:53:26,095 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:53:26,096 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47295, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:53:26,096 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:53:26,096 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46651 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,097 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46651 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,098 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1 2024-11-14T19:53:26,098 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35675 2024-11-14T19:53:26,098 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:53:26,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:53:26,109 DEBUG [RS:0;867b237d0fa7:38355 {}] zookeeper.ZKUtil(111): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,109 WARN [RS:0;867b237d0fa7:38355 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:53:26,109 INFO [RS:0;867b237d0fa7:38355 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:26,109 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,109 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,38355,1731614005730] 2024-11-14T19:53:26,113 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:53:26,115 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:53:26,116 INFO [RS:0;867b237d0fa7:38355 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:53:26,116 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,116 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:53:26,117 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:53:26,117 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,118 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,118 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,143 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:53:26,144 DEBUG [RS:0;867b237d0fa7:38355 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:53:26,145 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,145 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,145 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,145 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,145 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,145 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,38355,1731614005730-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:53:26,159 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:53:26,159 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,38355,1731614005730-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,159 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,159 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.Replication(171): 867b237d0fa7,38355,1731614005730 started 2024-11-14T19:53:26,173 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,173 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,38355,1731614005730, RpcServer on 867b237d0fa7/172.17.0.2:38355, sessionid=0x1013c169ef70001 2024-11-14T19:53:26,173 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:53:26,173 DEBUG [RS:0;867b237d0fa7:38355 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,173 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,38355,1731614005730' 2024-11-14T19:53:26,173 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:53:26,174 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:53:26,174 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:53:26,174 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:53:26,174 DEBUG [RS:0;867b237d0fa7:38355 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,174 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,38355,1731614005730' 2024-11-14T19:53:26,174 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:53:26,175 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:53:26,175 DEBUG [RS:0;867b237d0fa7:38355 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:53:26,175 INFO [RS:0;867b237d0fa7:38355 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:53:26,175 INFO [RS:0;867b237d0fa7:38355 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:53:26,246 WARN [867b237d0fa7:46651 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T19:53:26,278 INFO [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C38355%2C1731614005730, suffix=, logDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730, archiveDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs, maxLogs=32 2024-11-14T19:53:26,278 INFO [RS:0;867b237d0fa7:38355 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.1731614006278 2024-11-14T19:53:26,288 INFO [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 2024-11-14T19:53:26,290 DEBUG [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33355:33355),(127.0.0.1/127.0.0.1:44249:44249)] 2024-11-14T19:53:26,497 DEBUG [867b237d0fa7:46651 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:53:26,499 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,503 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,38355,1731614005730, state=OPENING 2024-11-14T19:53:26,534 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:53:26,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:26,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:53:26,545 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:53:26,545 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:26,545 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:26,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,38355,1731614005730}] 2024-11-14T19:53:26,701 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:53:26,703 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47319, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:53:26,707 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:53:26,708 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:26,710 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C38355%2C1731614005730.meta, suffix=.meta, logDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730, archiveDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs, maxLogs=32 2024-11-14T19:53:26,711 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta 2024-11-14T19:53:26,717 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta 2024-11-14T19:53:26,718 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33355:33355),(127.0.0.1/127.0.0.1:44249:44249)] 2024-11-14T19:53:26,718 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:53:26,719 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:53:26,719 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:53:26,719 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:53:26,719 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:53:26,719 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:26,719 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:53:26,719 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:53:26,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:53:26,724 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:53:26,724 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:53:26,726 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:53:26,726 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:53:26,727 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:53:26,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:53:26,729 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:53:26,729 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:26,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:53:26,730 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:53:26,731 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740 2024-11-14T19:53:26,732 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740 2024-11-14T19:53:26,733 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:53:26,733 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:53:26,734 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:53:26,736 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:53:26,736 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880828, jitterRate=0.12003099918365479}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:53:26,737 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:53:26,737 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731614006719Writing region info on filesystem at 1731614006719Initializing all the Stores at 1731614006721 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614006721Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614006722 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614006722Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614006722Cleaning up temporary data from old regions at 1731614006733 (+11 ms)Running coprocessor post-open hooks at 1731614006737 (+4 ms)Region opened successfully at 1731614006737 2024-11-14T19:53:26,739 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731614006701 2024-11-14T19:53:26,742 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:53:26,742 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:53:26,743 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,744 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,38355,1731614005730, state=OPEN 2024-11-14T19:53:26,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:53:26,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:53:26,775 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,38355,1731614005730 2024-11-14T19:53:26,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:26,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:53:26,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:53:26,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,38355,1731614005730 in 230 msec 2024-11-14T19:53:26,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:53:26,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 685 msec 2024-11-14T19:53:26,782 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:53:26,782 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:53:26,784 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:53:26,784 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,38355,1731614005730, seqNum=-1] 2024-11-14T19:53:26,784 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:53:26,786 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60685, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:53:26,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 743 msec 2024-11-14T19:53:26,793 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731614006793, completionTime=-1 2024-11-14T19:53:26,793 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:53:26,793 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:53:26,795 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:53:26,795 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731614066795 2024-11-14T19:53:26,795 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614126795 2024-11-14T19:53:26,795 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-14T19:53:26,796 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,46651,1731614005599-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,796 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,46651,1731614005599-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,796 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,46651,1731614005599-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,796 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:46651, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,796 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,796 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,798 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.999sec 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,46651,1731614005599-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:53:26,801 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,46651,1731614005599-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:53:26,804 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:53:26,804 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:53:26,804 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,46651,1731614005599-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f90614, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:53:26,861 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,46651,-1 for getting cluster id 2024-11-14T19:53:26,861 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:53:26,863 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbbcad62-88d6-44cd-8810-26bb81fe1a4b' 2024-11-14T19:53:26,863 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:53:26,863 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbbcad62-88d6-44cd-8810-26bb81fe1a4b" 2024-11-14T19:53:26,863 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71c7f46f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:53:26,864 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,46651,-1] 2024-11-14T19:53:26,864 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:53:26,864 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:53:26,866 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46818, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:53:26,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b7a2925, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:53:26,867 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:53:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,38355,1731614005730, seqNum=-1] 2024-11-14T19:53:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:53:26,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:53:26,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,46651,1731614005599 2024-11-14T19:53:26,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:26,874 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:53:26,889 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:53:26,889 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:53:26,890 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37273 2024-11-14T19:53:26,891 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37273 connecting to ZooKeeper ensemble=127.0.0.1:50637 2024-11-14T19:53:26,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:26,893 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:53:26,909 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:372730x0, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:53:26,909 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T19:53:26,909 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:372730x0, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T19:53:26,909 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37273-0x1013c169ef70002 connected 2024-11-14T19:53:26,910 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:53:26,910 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:53:26,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:53:26,912 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:53:26,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37273 2024-11-14T19:53:26,914 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37273 2024-11-14T19:53:26,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37273 2024-11-14T19:53:26,918 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37273 2024-11-14T19:53:26,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37273 2024-11-14T19:53:26,922 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(746): ClusterId : cbbcad62-88d6-44cd-8810-26bb81fe1a4b 2024-11-14T19:53:26,922 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:53:26,928 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:53:26,928 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:53:26,937 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:53:26,937 DEBUG [RS:1;867b237d0fa7:37273 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a0227c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:53:26,948 DEBUG [RS:1;867b237d0fa7:37273 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;867b237d0fa7:37273 2024-11-14T19:53:26,948 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:53:26,948 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:53:26,948 DEBUG [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:53:26,949 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,46651,1731614005599 with port=37273, startcode=1731614006888 2024-11-14T19:53:26,949 DEBUG [RS:1;867b237d0fa7:37273 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:53:26,951 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55807, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:53:26,951 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46651 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,37273,1731614006888 2024-11-14T19:53:26,952 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46651 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,37273,1731614006888 2024-11-14T19:53:26,953 DEBUG [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1 2024-11-14T19:53:26,953 DEBUG [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35675 2024-11-14T19:53:26,953 DEBUG [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:53:26,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:53:26,961 DEBUG [RS:1;867b237d0fa7:37273 {}] zookeeper.ZKUtil(111): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,37273,1731614006888 2024-11-14T19:53:26,961 WARN [RS:1;867b237d0fa7:37273 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:53:26,961 INFO [RS:1;867b237d0fa7:37273 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:53:26,961 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,37273,1731614006888] 2024-11-14T19:53:26,962 DEBUG [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888 2024-11-14T19:53:26,965 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:53:26,967 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:53:26,967 INFO [RS:1;867b237d0fa7:37273 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:53:26,967 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,967 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:53:26,968 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:53:26,968 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,968 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,968 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,968 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:53:26,969 DEBUG [RS:1;867b237d0fa7:37273 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:53:26,969 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,969 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,970 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,970 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,970 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,970 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,37273,1731614006888-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:53:26,983 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:53:26,983 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,37273,1731614006888-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,983 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,983 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.Replication(171): 867b237d0fa7,37273,1731614006888 started 2024-11-14T19:53:26,997 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:53:26,997 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,37273,1731614006888, RpcServer on 867b237d0fa7/172.17.0.2:37273, sessionid=0x1013c169ef70002 2024-11-14T19:53:26,997 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:53:26,997 DEBUG [RS:1;867b237d0fa7:37273 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,37273,1731614006888 2024-11-14T19:53:26,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;867b237d0fa7:37273,5,FailOnTimeoutGroup] 2024-11-14T19:53:26,997 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,37273,1731614006888' 2024-11-14T19:53:26,997 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:53:26,997 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:53:26,998 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,37273,1731614006888 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,37273,1731614006888' 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:53:26,998 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:53:26,999 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 867b237d0fa7,46651,1731614005599 2024-11-14T19:53:26,999 DEBUG [RS:1;867b237d0fa7:37273 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:53:26,999 INFO [RS:1;867b237d0fa7:37273 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:53:26,999 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@678746b5 2024-11-14T19:53:26,999 INFO [RS:1;867b237d0fa7:37273 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:53:26,999 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T19:53:27,001 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T19:53:27,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T19:53:27,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T19:53:27,001 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:53:27,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T19:53:27,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T19:53:27,005 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:27,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-14T19:53:27,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:53:27,006 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T19:53:27,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741835_1011 (size=393) 2024-11-14T19:53:27,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741835_1011 (size=393) 2024-11-14T19:53:27,015 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2bf12d8ce7d3607de699a010d06fabfe, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1 2024-11-14T19:53:27,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33323 is added to blk_1073741836_1012 (size=76) 2024-11-14T19:53:27,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42741 is added to blk_1073741836_1012 (size=76) 2024-11-14T19:53:27,023 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:27,023 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 2bf12d8ce7d3607de699a010d06fabfe, disabling compactions & flushes 2024-11-14T19:53:27,023 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,023 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,023 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. after waiting 0 ms 2024-11-14T19:53:27,023 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,024 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,024 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2bf12d8ce7d3607de699a010d06fabfe: Waiting for close lock at 1731614007023Disabling compacts and flushes for region at 1731614007023Disabling writes for close at 1731614007023Writing region close event to WAL at 1731614007024 (+1 ms)Closed at 1731614007024 2024-11-14T19:53:27,026 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T19:53:27,026 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731614007026"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731614007026"}]},"ts":"1731614007026"} 2024-11-14T19:53:27,029 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T19:53:27,030 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T19:53:27,031 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614007031"}]},"ts":"1731614007031"} 2024-11-14T19:53:27,034 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-14T19:53:27,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2bf12d8ce7d3607de699a010d06fabfe, ASSIGN}] 2024-11-14T19:53:27,036 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2bf12d8ce7d3607de699a010d06fabfe, ASSIGN 2024-11-14T19:53:27,037 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2bf12d8ce7d3607de699a010d06fabfe, ASSIGN; state=OFFLINE, location=867b237d0fa7,38355,1731614005730; forceNewPlan=false, retain=false 2024-11-14T19:53:27,102 INFO [RS:1;867b237d0fa7:37273 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C37273%2C1731614006888, suffix=, logDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888, archiveDir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs, maxLogs=32 2024-11-14T19:53:27,102 INFO [RS:1;867b237d0fa7:37273 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C37273%2C1731614006888.1731614007102 2024-11-14T19:53:27,110 INFO [RS:1;867b237d0fa7:37273 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 2024-11-14T19:53:27,113 DEBUG [RS:1;867b237d0fa7:37273 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44249:44249),(127.0.0.1/127.0.0.1:33355:33355)] 2024-11-14T19:53:27,189 INFO [867b237d0fa7:46651 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-14T19:53:27,189 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2bf12d8ce7d3607de699a010d06fabfe, regionState=OPENING, regionLocation=867b237d0fa7,38355,1731614005730 2024-11-14T19:53:27,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2bf12d8ce7d3607de699a010d06fabfe, ASSIGN because future has completed 2024-11-14T19:53:27,195 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bf12d8ce7d3607de699a010d06fabfe, server=867b237d0fa7,38355,1731614005730}] 2024-11-14T19:53:27,355 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,356 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2bf12d8ce7d3607de699a010d06fabfe, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:53:27,357 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,357 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:53:27,357 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,357 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,359 INFO [StoreOpener-2bf12d8ce7d3607de699a010d06fabfe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,361 INFO [StoreOpener-2bf12d8ce7d3607de699a010d06fabfe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bf12d8ce7d3607de699a010d06fabfe columnFamilyName info 2024-11-14T19:53:27,362 DEBUG [StoreOpener-2bf12d8ce7d3607de699a010d06fabfe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:53:27,362 INFO [StoreOpener-2bf12d8ce7d3607de699a010d06fabfe-1 {}] regionserver.HStore(327): Store=2bf12d8ce7d3607de699a010d06fabfe/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:53:27,363 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,364 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,364 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,365 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,365 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,368 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,370 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:53:27,371 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2bf12d8ce7d3607de699a010d06fabfe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842246, jitterRate=0.07097193598747253}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:53:27,371 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:27,371 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2bf12d8ce7d3607de699a010d06fabfe: Running coprocessor pre-open hook at 1731614007357Writing region info on filesystem at 1731614007357Initializing all the Stores at 1731614007359 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614007359Cleaning up temporary data from old regions at 1731614007365 (+6 ms)Running coprocessor post-open hooks at 1731614007371 (+6 ms)Region opened successfully at 1731614007371 2024-11-14T19:53:27,373 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe., pid=6, masterSystemTime=1731614007350 2024-11-14T19:53:27,375 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,375 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:27,376 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2bf12d8ce7d3607de699a010d06fabfe, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,38355,1731614005730 2024-11-14T19:53:27,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bf12d8ce7d3607de699a010d06fabfe, server=867b237d0fa7,38355,1731614005730 because future has completed 2024-11-14T19:53:27,383 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T19:53:27,383 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2bf12d8ce7d3607de699a010d06fabfe, server=867b237d0fa7,38355,1731614005730 in 186 msec 2024-11-14T19:53:27,386 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T19:53:27,386 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2bf12d8ce7d3607de699a010d06fabfe, ASSIGN in 349 msec 2024-11-14T19:53:27,387 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T19:53:27,387 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614007387"}]},"ts":"1731614007387"} 2024-11-14T19:53:27,390 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-14T19:53:27,391 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T19:53:27,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 390 msec 2024-11-14T19:53:30,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:53:30,669 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T19:53:30,671 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T19:53:30,672 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-14T19:53:30,674 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:53:30,674 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T19:53:30,675 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:53:30,675 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T19:53:32,226 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:53:32,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:32,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:32,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:32,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:53:32,266 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-14T19:53:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46651 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:53:37,098 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-14T19:53:37,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-14T19:53:37,102 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T19:53:37,102 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:37,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:37,121 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:37,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:37,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:37,122 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:53:37,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12380f44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:37,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4208698f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:37,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ebac93e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-35353-hadoop-hdfs-3_4_1-tests_jar-_-any-11304175157683246817/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:37,222 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24e1907f{HTTP/1.1, (http/1.1)}{localhost:35353} 2024-11-14T19:53:37,222 INFO [Time-limited test {}] server.Server(415): Started @129267ms 2024-11-14T19:53:37,223 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:37,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:37,262 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:37,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:37,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:37,263 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:53:37,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5974478d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:37,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ba3fb64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:37,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17c4d1ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-33965-hadoop-hdfs-3_4_1-tests_jar-_-any-8871471680682135780/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:37,362 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b41cb16{HTTP/1.1, (http/1.1)}{localhost:33965} 2024-11-14T19:53:37,362 INFO [Time-limited test {}] server.Server(415): Started @129408ms 2024-11-14T19:53:37,363 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:37,391 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:37,394 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:37,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:37,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:37,395 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:53:37,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d396542{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:37,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c34cffb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:37,489 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b23fe4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-43747-hadoop-hdfs-3_4_1-tests_jar-_-any-16331941044333063571/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:37,489 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9de090{HTTP/1.1, (http/1.1)}{localhost:43747} 2024-11-14T19:53:37,489 INFO [Time-limited test {}] server.Server(415): Started @129535ms 2024-11-14T19:53:37,491 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:38,101 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data6/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:38,102 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data5/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:38,124 WARN [Thread-875 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data7/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:38,126 WARN [Thread-877 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data8/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:38,127 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6636468d74f0afd4 with lease ID 0xce2e299b1493b0f1: Processing first storage report for DS-f7c03f61-fab7-49db-a217-682cac712c93 from datanode DatanodeRegistration(127.0.0.1:44027, datanodeUuid=38fb90e0-8bf6-45cd-bad5-f472a065f760, infoPort=41161, infoSecurePort=0, ipcPort=42903, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6636468d74f0afd4 with lease ID 0xce2e299b1493b0f1: from storage DS-f7c03f61-fab7-49db-a217-682cac712c93 node DatanodeRegistration(127.0.0.1:44027, datanodeUuid=38fb90e0-8bf6-45cd-bad5-f472a065f760, infoPort=41161, infoSecurePort=0, ipcPort=42903, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6636468d74f0afd4 with lease ID 0xce2e299b1493b0f1: Processing first storage report for DS-97c62ea2-4fa5-4bbe-95b1-0c2abc1d7218 from datanode DatanodeRegistration(127.0.0.1:44027, datanodeUuid=38fb90e0-8bf6-45cd-bad5-f472a065f760, infoPort=41161, infoSecurePort=0, ipcPort=42903, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6636468d74f0afd4 with lease ID 0xce2e299b1493b0f1: from storage DS-97c62ea2-4fa5-4bbe-95b1-0c2abc1d7218 node DatanodeRegistration(127.0.0.1:44027, datanodeUuid=38fb90e0-8bf6-45cd-bad5-f472a065f760, infoPort=41161, infoSecurePort=0, ipcPort=42903, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:38,156 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:38,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b116077d3819ca2 with lease ID 0xce2e299b1493b0f2: Processing first storage report for DS-dd69739c-8186-4404-b0a6-491b63108c4d from datanode DatanodeRegistration(127.0.0.1:40727, datanodeUuid=eb4020f0-9a38-43eb-9857-b97f4c70551e, infoPort=46435, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b116077d3819ca2 with lease ID 0xce2e299b1493b0f2: from storage DS-dd69739c-8186-4404-b0a6-491b63108c4d node DatanodeRegistration(127.0.0.1:40727, datanodeUuid=eb4020f0-9a38-43eb-9857-b97f4c70551e, infoPort=46435, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b116077d3819ca2 with lease ID 0xce2e299b1493b0f2: Processing first storage report for DS-ccc22ee6-55ce-465d-a37a-ff325c659d78 from datanode DatanodeRegistration(127.0.0.1:40727, datanodeUuid=eb4020f0-9a38-43eb-9857-b97f4c70551e, infoPort=46435, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b116077d3819ca2 with lease ID 0xce2e299b1493b0f2: from storage DS-ccc22ee6-55ce-465d-a37a-ff325c659d78 node DatanodeRegistration(127.0.0.1:40727, datanodeUuid=eb4020f0-9a38-43eb-9857-b97f4c70551e, infoPort=46435, infoSecurePort=0, ipcPort=39637, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:38,208 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:38,208 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9/current/BP-1048535874-172.17.0.2-1731614004102/current, will proceed with Du for space computation calculation, 2024-11-14T19:53:38,228 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdc48d0ab48509cb4 with lease ID 0xce2e299b1493b0f3: Processing first storage report for DS-2f5ee03e-a805-447d-84af-222352ad5577 from datanode DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc48d0ab48509cb4 with lease ID 0xce2e299b1493b0f3: from storage DS-2f5ee03e-a805-447d-84af-222352ad5577 node DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T19:53:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdc48d0ab48509cb4 with lease ID 0xce2e299b1493b0f3: Processing first storage report for DS-41aef3fe-177d-4328-a2aa-f4325881a6ad from datanode DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102) 2024-11-14T19:53:38,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc48d0ab48509cb4 with lease ID 0xce2e299b1493b0f3: from storage DS-41aef3fe-177d-4328-a2aa-f4325881a6ad node DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:38,316 WARN [ResponseProcessor for block BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,316 WARN [ResponseProcessor for block BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,316 WARN [ResponseProcessor for block BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,316 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:38,316 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta block BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:38,316 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:38,316 WARN [ResponseProcessor for block BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,316 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:38,317 WARN [PacketResponder: BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33323] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70c9360c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:38,318 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636181918_22 at /127.0.0.1:50020 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50020 dst: /127.0.0.1:33323 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47b51e28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:38,319 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:38,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:32946 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32946 dst: /127.0.0.1:42741 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_636181918_22 at /127.0.0.1:32980 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32980 dst: /127.0.0.1:42741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@150f352b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:38,318 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:49994 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49994 dst: /127.0.0.1:33323 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 WARN [PacketResponder: BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33323] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c7a788c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:38,319 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:32938 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32938 dst: /127.0.0.1:42741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:49980 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49980 dst: /127.0.0.1:33323 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,319 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_177392807_22 at /127.0.0.1:32910 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32910 dst: /127.0.0.1:42741 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_177392807_22 at /127.0.0.1:49944 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33323:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49944 dst: /127.0.0.1:33323 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,323 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:38,323 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:38,323 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1048535874-172.17.0.2-1731614004102 (Datanode Uuid adcea9a4-2ef4-4d3a-bca1-bb11d75b183b) service to localhost/127.0.0.1:35675 2024-11-14T19:53:38,323 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:38,324 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data3/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:38,324 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data4/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:38,325 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:38,325 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,331 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@4a14e1a2 {}] datanode.DataXceiver(331): 127.0.0.1:42741:DataXceiver error processing unknown operation src: /127.0.0.1:40660 dst: /127.0.0.1:42741 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:38,332 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,332 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta block BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,332 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cb3fa36{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:38,336 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17f0fddd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:38,336 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:38,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3662c58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:38,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ba4cdcb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:38,337 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:38,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:38,338 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1048535874-172.17.0.2-1731614004102 (Datanode Uuid 20f8daf3-02a9-46b0-92b2-dbcaae5b6dc9) service to localhost/127.0.0.1:35675 2024-11-14T19:53:38,338 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:38,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data1/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:38,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data2/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:38,339 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:38,344 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe., hostname=867b237d0fa7,38355,1731614005730, seqNum=2] 2024-11-14T19:53:38,345 ERROR [FSHLog-0-hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1-prefix:867b237d0fa7,38355,1731614005730 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,346 WARN [FSHLog-0-hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1-prefix:867b237d0fa7,38355,1731614005730 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,346 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C38355%2C1731614005730:(num 1731614006278) roll requested 2024-11-14T19:53:38,346 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.1731614018346 2024-11-14T19:53:38,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:38,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:38,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:38,355 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:38,355 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:38,355 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614018346 2024-11-14T19:53:38,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:38,357 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-14T19:53:38,357 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-14T19:53:38,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 2024-11-14T19:53:38,359 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46435:46435),(127.0.0.1/127.0.0.1:41161:41161)] 2024-11-14T19:53:38,359 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:38,360 WARN [IPC Server handler 3 on default port 35675 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-14T19:53:38,363 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 after 5ms 2024-11-14T19:53:38,970 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:39,848 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:40,359 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:40,360 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614018346 2024-11-14T19:53:40,361 WARN [ResponseProcessor for block BP-1048535874-172.17.0.2-1731614004102:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1048535874-172.17.0.2-1731614004102:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:40,361 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614018346 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:40,362 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:41024 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:40727:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41024 dst: /127.0.0.1:40727 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:40,362 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:33742 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44027:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33742 dst: /127.0.0.1:44027 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:40,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17c4d1ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:40,366 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b41cb16{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:40,366 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:40,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ba3fb64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:40,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5974478d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:40,368 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:40,368 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:40,368 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1048535874-172.17.0.2-1731614004102 (Datanode Uuid eb4020f0-9a38-43eb-9857-b97f4c70551e) service to localhost/127.0.0.1:35675 2024-11-14T19:53:40,368 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:40,369 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data7/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:40,369 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:40,370 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data8/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:40,971 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:41,849 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:42,360 WARN [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]] 2024-11-14T19:53:42,360 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:42,360 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C38355%2C1731614005730:(num 1731614018346) roll requested 2024-11-14T19:53:42,360 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.1731614022360 2024-11-14T19:53:42,363 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:42,363 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:42,363 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741839_1021 2024-11-14T19:53:42,364 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 after 4007ms 2024-11-14T19:53:42,366 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:42,369 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:42,369 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:42,369 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741840_1022 2024-11-14T19:53:42,369 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:42,372 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40727 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:42,372 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:58744 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741841_1023 to mirror 127.0.0.1:40727 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:42,372 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:42,372 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741841_1023 2024-11-14T19:53:42,372 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:58744 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T19:53:42,372 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:58744 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58744 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:42,373 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:42,375 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T19:53:42,377 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:42,377 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:42,378 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:42,378 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:42,378 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:42,378 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614018346 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614022360 2024-11-14T19:53:42,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741838_1020 (size=3600) 2024-11-14T19:53:42,383 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41161:41161),(127.0.0.1/127.0.0.1:39365:39365)] 2024-11-14T19:53:42,383 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:42,383 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614018346 is not closed yet, will try archiving it next time 2024-11-14T19:53:42,781 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:42,971 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:43,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741838_1020 (size=3600) 2024-11-14T19:53:43,849 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,380 WARN [ResponseProcessor for block BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,380 WARN [DataStreamer for file /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614022360 block BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:44,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:58748 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58748 dst: /127.0.0.1:38091 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:33758 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44027:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33758 dst: /127.0.0.1:44027 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,383 WARN [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]] 2024-11-14T19:53:44,383 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,384 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C38355%2C1731614005730:(num 1731614022360) roll requested 2024-11-14T19:53:44,384 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.1731614024384 2024-11-14T19:53:44,388 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,388 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:44,388 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741843_1026 2024-11-14T19:53:44,389 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:44,390 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,391 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:44,391 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741844_1027 2024-11-14T19:53:44,391 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:44,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ebac93e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:44,392 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24e1907f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:53:44,392 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:53:44,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4208698f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:53:44,392 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12380f44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:53:44,393 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:53:44,393 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:53:44,394 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1048535874-172.17.0.2-1731614004102 (Datanode Uuid 38fb90e0-8bf6-45cd-bad5-f472a065f760) service to localhost/127.0.0.1:35675 2024-11-14T19:53:44,394 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:53:44,394 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40727 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36442 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741845_1028 to mirror 127.0.0.1:40727 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,394 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data5/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:44,394 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:44,394 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36442 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T19:53:44,394 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741845_1028 2024-11-14T19:53:44,395 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data6/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:53:44,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36442 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36442 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,395 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:53:44,395 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:44,398 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44027 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,398 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36444 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741846_1029 to mirror 127.0.0.1:44027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,398 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:44,398 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741846_1029 2024-11-14T19:53:44,398 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36444 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T19:53:44,399 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36444 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36444 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,399 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:44,400 WARN [IPC Server handler 1 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:44,400 WARN [IPC Server handler 1 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:44,400 WARN [IPC Server handler 1 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:44,404 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:44,404 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:44,404 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:44,404 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:44,404 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:44,405 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614022360 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614024384 2024-11-14T19:53:44,406 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39365:39365)] 2024-11-14T19:53:44,406 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:44,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741842_1025 (size=3600) 2024-11-14T19:53:44,406 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614022360 is not closed yet, will try archiving it next time 2024-11-14T19:53:44,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38355 {}] regionserver.HRegion(8855): Flush requested on 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:44,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2bf12d8ce7d3607de699a010d06fabfe 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:53:44,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/a618fe1e48ac46afa6253c49997580ca is 1080, key is row0002/info:/1731614020371/Put/seqid=0 2024-11-14T19:53:44,430 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,430 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:44,430 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741848_1031 2024-11-14T19:53:44,431 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:44,433 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,433 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:44,433 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741849_1032 2024-11-14T19:53:44,434 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:44,436 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40727 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,436 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:44,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36458 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741850_1033 to mirror 127.0.0.1:40727 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,437 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741850_1033 2024-11-14T19:53:44,437 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36458 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:44,437 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36458 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36458 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,437 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:44,440 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44027 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:44,440 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36464 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741851_1034 to mirror 127.0.0.1:44027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,440 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:44,440 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741851_1034 2024-11-14T19:53:44,440 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36464 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:44,440 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36464 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36464 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:44,441 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:44,442 WARN [IPC Server handler 2 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:44,442 WARN [IPC Server handler 2 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:44,442 WARN [IPC Server handler 2 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:44,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741852_1035 (size=10347) 2024-11-14T19:53:44,807 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:44,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/a618fe1e48ac46afa6253c49997580ca 2024-11-14T19:53:44,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/a618fe1e48ac46afa6253c49997580ca as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a618fe1e48ac46afa6253c49997580ca 2024-11-14T19:53:44,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a618fe1e48ac46afa6253c49997580ca, entries=5, sequenceid=11, filesize=10.1 K 2024-11-14T19:53:44,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 2bf12d8ce7d3607de699a010d06fabfe in 458ms, sequenceid=11, compaction requested=false 2024-11-14T19:53:44,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:53:44,972 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38355 {}] regionserver.HRegion(8855): Flush requested on 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:45,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2bf12d8ce7d3607de699a010d06fabfe 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-14T19:53:45,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/55d40ee99da14e9b9c953062479c81b0 is 1080, key is row0007/info:/1731614024408/Put/seqid=0 2024-11-14T19:53:45,043 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42741 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:45,043 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36484 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741853_1036 to mirror 127.0.0.1:42741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:45,043 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:45,043 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741853_1036 2024-11-14T19:53:45,043 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36484 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:45,043 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36484 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36484 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:45,044 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:45,046 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40727 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:45,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36488 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741854_1037 to mirror 127.0.0.1:40727 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:45,046 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:45,046 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741854_1037 2024-11-14T19:53:45,046 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36488 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:45,046 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36488 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36488 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:45,047 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:45,049 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:45,049 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:45,049 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741855_1038 2024-11-14T19:53:45,050 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:45,053 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:45,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36504 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741856_1039 to mirror 127.0.0.1:33323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:45,053 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:45,053 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36504 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:45,053 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741856_1039 2024-11-14T19:53:45,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36504 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36504 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:45,054 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:45,055 WARN [IPC Server handler 4 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:45,055 WARN [IPC Server handler 4 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:45,055 WARN [IPC Server handler 4 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741857_1040 (size=12506) 2024-11-14T19:53:45,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/55d40ee99da14e9b9c953062479c81b0 2024-11-14T19:53:45,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/55d40ee99da14e9b9c953062479c81b0 as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0 2024-11-14T19:53:45,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0, entries=7, sequenceid=24, filesize=12.2 K 2024-11-14T19:53:45,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 2bf12d8ce7d3607de699a010d06fabfe in 442ms, sequenceid=24, compaction requested=false 2024-11-14T19:53:45,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:53:45,478 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-14T19:53:45,478 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:45,478 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0 because midkey is the same as first or last row 2024-11-14T19:53:45,850 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,232 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1cdd6ca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741842_1025 to 127.0.0.1:44027 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,232 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0a2883[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741852_1035 to 127.0.0.1:44027 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,407 WARN [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]] 2024-11-14T19:53:46,407 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,407 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C38355%2C1731614005730:(num 1731614024384) roll requested 2024-11-14T19:53:46,408 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.1731614026408 2024-11-14T19:53:46,414 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,414 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:46,414 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741858_1041 2024-11-14T19:53:46,416 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:46,419 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,419 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:46,419 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741859_1042 2024-11-14T19:53:46,420 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:46,424 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42741 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36524 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741860_1043 to mirror 127.0.0.1:42741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,425 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:46,425 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741860_1043 2024-11-14T19:53:46,425 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36524 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T19:53:46,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36524 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36524 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,426 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:46,427 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,427 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:46,427 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741861_1044 2024-11-14T19:53:46,428 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:46,429 WARN [IPC Server handler 3 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:46,429 WARN [IPC Server handler 3 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:46,429 WARN [IPC Server handler 3 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:46,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:46,432 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:46,432 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:46,432 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:46,432 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:46,432 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614024384 with entries=21, filesize=20.81 KB; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614026408 2024-11-14T19:53:46,433 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39365:39365)] 2024-11-14T19:53:46,433 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:46,433 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614024384 is not closed yet, will try archiving it next time 2024-11-14T19:53:46,434 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614018346 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs/867b237d0fa7%2C38355%2C1731614005730.1731614018346 2024-11-14T19:53:46,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741847_1030 (size=21316) 2024-11-14T19:53:46,435 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614022360 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs/867b237d0fa7%2C38355%2C1731614005730.1731614022360 2024-11-14T19:53:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38355 {}] regionserver.HRegion(8855): Flush requested on 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:46,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2bf12d8ce7d3607de699a010d06fabfe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T19:53:46,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/d3169638db134ceba0b5e1ded08723a6 is 1079, key is tmprow/info:/1731614026465/Put/seqid=0 2024-11-14T19:53:46,479 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,479 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36538 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741863_1046 to mirror 127.0.0.1:33323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,479 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:46,479 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741863_1046 2024-11-14T19:53:46,479 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36538 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:46,479 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36538 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36538 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,480 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:46,481 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40727 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,481 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36548 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741864_1047 to mirror 127.0.0.1:40727 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,482 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:46,482 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741864_1047 2024-11-14T19:53:46,482 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36548 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:46,482 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36548 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36548 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,482 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:46,483 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,484 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:46,484 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741865_1048 2024-11-14T19:53:46,484 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:46,486 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42741 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,486 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36554 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741866_1049 to mirror 127.0.0.1:42741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,486 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:46,486 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741866_1049 2024-11-14T19:53:46,486 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36554 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:46,487 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36554 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36554 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,487 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:46,488 WARN [IPC Server handler 0 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:46,488 WARN [IPC Server handler 0 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:46,488 WARN [IPC Server handler 0 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:46,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741867_1050 (size=6027) 2024-11-14T19:53:46,836 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 is not closed yet, will try archiving it next time 2024-11-14T19:53:46,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/d3169638db134ceba0b5e1ded08723a6 2024-11-14T19:53:46,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/d3169638db134ceba0b5e1ded08723a6 as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d3169638db134ceba0b5e1ded08723a6 2024-11-14T19:53:46,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d3169638db134ceba0b5e1ded08723a6, entries=1, sequenceid=34, filesize=5.9 K 2024-11-14T19:53:46,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 2bf12d8ce7d3607de699a010d06fabfe in 440ms, sequenceid=34, compaction requested=true 2024-11-14T19:53:46,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:53:46,907 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-14T19:53:46,907 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:46,907 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0 because midkey is the same as first or last row 2024-11-14T19:53:46,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bf12d8ce7d3607de699a010d06fabfe:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:53:46,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:53:46,907 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:53:46,909 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:53:46,909 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HStore(1541): 2bf12d8ce7d3607de699a010d06fabfe/info is initiating minor compaction (all files) 2024-11-14T19:53:46,909 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2bf12d8ce7d3607de699a010d06fabfe/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:53:46,909 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a618fe1e48ac46afa6253c49997580ca, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d3169638db134ceba0b5e1ded08723a6] into tmpdir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp, totalSize=28.2 K 2024-11-14T19:53:46,909 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.Compactor(225): Compacting a618fe1e48ac46afa6253c49997580ca, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731614020371 2024-11-14T19:53:46,910 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.Compactor(225): Compacting 55d40ee99da14e9b9c953062479c81b0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731614024408 2024-11-14T19:53:46,910 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3169638db134ceba0b5e1ded08723a6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731614026465 2024-11-14T19:53:46,926 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bf12d8ce7d3607de699a010d06fabfe#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:53:46,927 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/b1e30288d7e343458f5ed7d847527efe is 1080, key is row0002/info:/1731614020371/Put/seqid=0 2024-11-14T19:53:46,929 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,929 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:46,929 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741868_1051 2024-11-14T19:53:46,930 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:46,931 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,931 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:46,931 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741869_1052 2024-11-14T19:53:46,932 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:46,933 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,934 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:46,934 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741870_1053 2024-11-14T19:53:46,934 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:46,937 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42741 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:46,937 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36592 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741871_1054 to mirror 127.0.0.1:42741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,937 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:46,937 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741871_1054 2024-11-14T19:53:46,937 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36592 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:46,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36592 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36592 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:46,938 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:46,939 WARN [IPC Server handler 3 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:46,939 WARN [IPC Server handler 3 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:46,939 WARN [IPC Server handler 3 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:46,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741872_1055 (size=17994) 2024-11-14T19:53:46,972 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:47,233 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0a2883[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741857_1040 to 127.0.0.1:40727 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:47,356 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/b1e30288d7e343458f5ed7d847527efe as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe 2024-11-14T19:53:47,364 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2bf12d8ce7d3607de699a010d06fabfe/info of 2bf12d8ce7d3607de699a010d06fabfe into b1e30288d7e343458f5ed7d847527efe(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:53:47,364 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:53:47,364 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe., storeName=2bf12d8ce7d3607de699a010d06fabfe/info, priority=13, startTime=1731614026907; duration=0sec 2024-11-14T19:53:47,364 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T19:53:47,364 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe because midkey is the same as first or last row 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe because midkey is the same as first or last row 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe because midkey is the same as first or last row 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:53:47,365 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bf12d8ce7d3607de699a010d06fabfe:info 2024-11-14T19:53:47,850 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:47,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38355 {}] regionserver.HRegion(8855): Flush requested on 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:53:47,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2bf12d8ce7d3607de699a010d06fabfe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T19:53:47,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/a6c745358b6f4cbcae13a5d890685003 is 1079, key is tmprow/info:/1731614027886/Put/seqid=0 2024-11-14T19:53:47,893 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:47,894 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:47,894 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741873_1056 2024-11-14T19:53:47,894 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:47,895 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:47,896 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:53:47,896 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741874_1057 2024-11-14T19:53:47,896 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:53:47,899 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44027 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:47,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36610 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741875_1058 to mirror 127.0.0.1:44027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:47,899 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:47,899 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741875_1058 2024-11-14T19:53:47,899 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36610 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:47,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36610 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36610 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:47,900 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:47,903 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33323 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:47,903 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36616 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10]'}, localName='127.0.0.1:38091', datanodeUuid='d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741876_1059 to mirror 127.0.0.1:33323 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:47,903 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK], DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]) is bad. 2024-11-14T19:53:47,903 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36616 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T19:53:47,903 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741876_1059 2024-11-14T19:53:47,903 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1314217849_22 at /127.0.0.1:36616 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:38091:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36616 dst: /127.0.0.1:38091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:47,904 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33323,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK] 2024-11-14T19:53:47,905 WARN [IPC Server handler 2 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T19:53:47,905 WARN [IPC Server handler 2 on default port 35675 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T19:53:47,905 WARN [IPC Server handler 2 on default port 35675 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T19:53:47,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741877_1060 (size=6027) 2024-11-14T19:53:48,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/a6c745358b6f4cbcae13a5d890685003 2024-11-14T19:53:48,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/a6c745358b6f4cbcae13a5d890685003 as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a6c745358b6f4cbcae13a5d890685003 2024-11-14T19:53:48,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a6c745358b6f4cbcae13a5d890685003, entries=1, sequenceid=45, filesize=5.9 K 2024-11-14T19:53:48,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 2bf12d8ce7d3607de699a010d06fabfe in 438ms, sequenceid=45, compaction requested=false 2024-11-14T19:53:48,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:53:48,325 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-14T19:53:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:53:48,326 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe because midkey is the same as first or last row 2024-11-14T19:53:48,434 WARN [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-14T19:53:48,434 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:48,509 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:53:48,513 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:53:48,514 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:53:48,514 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:53:48,514 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:53:48,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e016e98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:53:48,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dc8d3e2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:53:48,607 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73d784f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/java.io.tmpdir/jetty-localhost-39549-hadoop-hdfs-3_4_1-tests_jar-_-any-11178231173237709547/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:53:48,608 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7d5058{HTTP/1.1, (http/1.1)}{localhost:39549} 2024-11-14T19:53:48,608 INFO [Time-limited test {}] server.Server(415): Started @140653ms 2024-11-14T19:53:48,609 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:53:48,972 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:49,033 WARN [Thread-986 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:53:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8103e347de04afa3 with lease ID 0xce2e299b1493b0f4: from storage DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd node DatanodeRegistration(127.0.0.1:42487, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=41829, infoSecurePort=0, ipcPort=45107, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8103e347de04afa3 with lease ID 0xce2e299b1493b0f4: from storage DS-c07377dd-703c-4de8-aba4-82533e9b65b7 node DatanodeRegistration(127.0.0.1:42487, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=41829, infoSecurePort=0, ipcPort=45107, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:53:49,234 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0a2883[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741847_1030 to 127.0.0.1:42741 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:49,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741867_1050 (size=6027) 2024-11-14T19:53:49,851 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:50,235 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@f0a2883[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741877_1060 to 127.0.0.1:42741 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:50,235 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1cdd6ca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38091, datanodeUuid=d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb, infoPort=39365, infoSecurePort=0, ipcPort=33099, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741872_1055 to 127.0.0.1:40727 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:53:50,434 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:50,973 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:51,851 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:52,435 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:52,973 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:53,852 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:54,435 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:54,974 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:55,579 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:53:55,852 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,053 ERROR [FSHLog-0-hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData-prefix:867b237d0fa7,46651,1731614005599 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,053 WARN [FSHLog-0-hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData-prefix:867b237d0fa7,46651,1731614005599 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,054 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C46651%2C1731614005599:(num 1731614005869) roll requested 2024-11-14T19:53:56,054 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C46651%2C1731614005599.1731614036054 2024-11-14T19:53:56,058 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,059 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:53:56,059 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741878_1061 2024-11-14T19:53:56,060 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:53:56,061 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,062 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:53:56,062 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741879_1062 2024-11-14T19:53:56,062 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:53:56,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:56,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:56,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:56,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:56,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:53:56,070 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614036054 2024-11-14T19:53:56,070 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,070 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,071 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 2024-11-14T19:53:56,071 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39365:39365),(127.0.0.1/127.0.0.1:41829:41829)] 2024-11-14T19:53:56,071 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 is not closed yet, will try archiving it next time 2024-11-14T19:53:56,071 WARN [IPC Server handler 4 on default port 35675 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-14T19:53:56,072 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 after 1ms 2024-11-14T19:53:56,436 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:56,974 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:58,436 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:58,974 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:53:59,054 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@578bb0a2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:42741,null,null]) java.net.ConnectException: Call From 867b237d0fa7/172.17.0.2 to localhost:46739 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T19:53:59,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741833_1019 (size=455) 2024-11-14T19:53:59,389 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614006278 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs/867b237d0fa7%2C38355%2C1731614005730.1731614006278 2024-11-14T19:53:59,390 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614024384 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs/867b237d0fa7%2C38355%2C1731614005730.1731614024384 2024-11-14T19:54:00,073 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/WALs/867b237d0fa7,46651,1731614005599/867b237d0fa7%2C46651%2C1731614005599.1731614005869 after 4002ms 2024-11-14T19:54:00,437 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:00,975 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:01,036 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6616838a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42487, datanodeUuid=adcea9a4-2ef4-4d3a-bca1-bb11d75b183b, infoPort=41829, infoSecurePort=0, ipcPort=45107, storageInfo=lv=-57;cid=testClusterID;nsid=17015394;c=1731614004102):Failed to transfer BP-1048535874-172.17.0.2-1731614004102:blk_1073741833_1019 to 127.0.0.1:40727 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:02,437 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:02,975 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,210 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.1731614044209 2024-11-14T19:54:04,216 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44027 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_177392807_22 at /127.0.0.1:54612 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data4]'}, localName='127.0.0.1:42487', datanodeUuid='adcea9a4-2ef4-4d3a-bca1-bb11d75b183b', xmitsInProgress=0}:Exception transferring block BP-1048535874-172.17.0.2-1731614004102:blk_1073741881_1065 to mirror 127.0.0.1:44027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:04,216 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42487,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:54:04,217 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741881_1065 2024-11-14T19:54:04,217 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_177392807_22 at /127.0.0.1:54612 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T19:54:04,217 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_177392807_22 at /127.0.0.1:54612 [Receiving block BP-1048535874-172.17.0.2-1731614004102:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:42487:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54612 dst: /127.0.0.1:42487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:04,218 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:54:04,224 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,224 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,224 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,224 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,224 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,225 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614026408 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614044209 2024-11-14T19:54:04,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741862_1045 (size=13591) 2024-11-14T19:54:04,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39365:39365),(127.0.0.1/127.0.0.1:41829:41829)] 2024-11-14T19:54:04,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614026408 is not closed yet, will try archiving it next time 2024-11-14T19:54:04,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38355 {}] regionserver.HRegion(8855): Flush requested on 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:54:04,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2bf12d8ce7d3607de699a010d06fabfe 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T19:54:04,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/6ec55c0f3e8145bd89e8abd836f3ee0a is 1080, key is row0013/info:/1731614044229/Put/seqid=0 2024-11-14T19:54:04,247 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,247 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:42487,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:54:04,247 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741883_1067 2024-11-14T19:54:04,248 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:54:04,249 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,249 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:54:04,249 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741884_1068 2024-11-14T19:54:04,250 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:54:04,252 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,252 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK], DatanodeInfoWithStorage[127.0.0.1:38091,DS-2f5ee03e-a805-447d-84af-222352ad5577,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:54:04,252 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741885_1069 2024-11-14T19:54:04,253 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:54:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741886_1070 (size=11421) 2024-11-14T19:54:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741886_1070 (size=11421) 2024-11-14T19:54:04,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/6ec55c0f3e8145bd89e8abd836f3ee0a 2024-11-14T19:54:04,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/6ec55c0f3e8145bd89e8abd836f3ee0a as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/6ec55c0f3e8145bd89e8abd836f3ee0a 2024-11-14T19:54:04,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/6ec55c0f3e8145bd89e8abd836f3ee0a, entries=6, sequenceid=55, filesize=11.2 K 2024-11-14T19:54:04,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 2bf12d8ce7d3607de699a010d06fabfe in 35ms, sequenceid=55, compaction requested=true 2024-11-14T19:54:04,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:54:04,275 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-14T19:54:04,275 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:54:04,275 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe because midkey is the same as first or last row 2024-11-14T19:54:04,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2bf12d8ce7d3607de699a010d06fabfe:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:54:04,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:54:04,276 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:54:04,277 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:54:04,277 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HStore(1541): 2bf12d8ce7d3607de699a010d06fabfe/info is initiating minor compaction (all files) 2024-11-14T19:54:04,277 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2bf12d8ce7d3607de699a010d06fabfe/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:54:04,277 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a6c745358b6f4cbcae13a5d890685003, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/6ec55c0f3e8145bd89e8abd836f3ee0a] into tmpdir=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp, totalSize=34.6 K 2024-11-14T19:54:04,278 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.Compactor(225): Compacting b1e30288d7e343458f5ed7d847527efe, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731614020371 2024-11-14T19:54:04,278 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6c745358b6f4cbcae13a5d890685003, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731614027886 2024-11-14T19:54:04,279 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ec55c0f3e8145bd89e8abd836f3ee0a, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731614028292 2024-11-14T19:54:04,295 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2bf12d8ce7d3607de699a010d06fabfe#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:54:04,295 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/666660bc3ecb4fa381498f4daf3696d5 is 1080, key is row0002/info:/1731614020371/Put/seqid=0 2024-11-14T19:54:04,297 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,297 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:54:04,297 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741887_1071 2024-11-14T19:54:04,298 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:54:04,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741888_1072 (size=23502) 2024-11-14T19:54:04,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741888_1072 (size=23502) 2024-11-14T19:54:04,311 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/666660bc3ecb4fa381498f4daf3696d5 as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/666660bc3ecb4fa381498f4daf3696d5 2024-11-14T19:54:04,319 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2bf12d8ce7d3607de699a010d06fabfe/info of 2bf12d8ce7d3607de699a010d06fabfe into 666660bc3ecb4fa381498f4daf3696d5(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:54:04,319 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2bf12d8ce7d3607de699a010d06fabfe: 2024-11-14T19:54:04,319 INFO [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe., storeName=2bf12d8ce7d3607de699a010d06fabfe/info, priority=13, startTime=1731614044275; duration=0sec 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/666660bc3ecb4fa381498f4daf3696d5 because midkey is the same as first or last row 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/666660bc3ecb4fa381498f4daf3696d5 because midkey is the same as first or last row 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/666660bc3ecb4fa381498f4daf3696d5 because midkey is the same as first or last row 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:54:04,320 DEBUG [RS:0;867b237d0fa7:38355-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2bf12d8ce7d3607de699a010d06fabfe:info 2024-11-14T19:54:04,438 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-14T19:54:04,438 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:54:04,456 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:54:04,456 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:04,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:04,457 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:04,457 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:54:04,457 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:54:04,457 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1050820601, stopped=false 2024-11-14T19:54:04,458 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,46651,1731614005599 2024-11-14T19:54:04,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:04,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:04,532 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:04,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:04,532 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:04,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:04,532 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:54:04,533 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:54:04,533 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:04,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:04,533 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:04,534 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,38355,1731614005730' ***** 2024-11-14T19:54:04,534 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:54:04,534 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:04,534 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,37273,1731614006888' ***** 2024-11-14T19:54:04,534 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:54:04,535 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:54:04,535 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:54:04,535 INFO [RS:0;867b237d0fa7:38355 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:54:04,535 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:04,535 INFO [RS:0;867b237d0fa7:38355 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:54:04,535 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:54:04,535 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(3091): Received CLOSE for 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:54:04,535 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:54:04,536 INFO [RS:1;867b237d0fa7:37273 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:54:04,536 INFO [RS:1;867b237d0fa7:37273 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:54:04,536 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,38355,1731614005730 2024-11-14T19:54:04,536 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,37273,1731614006888 2024-11-14T19:54:04,536 INFO [RS:0;867b237d0fa7:38355 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:54:04,536 INFO [RS:1;867b237d0fa7:37273 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:54:04,536 INFO [RS:0;867b237d0fa7:38355 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:38355. 2024-11-14T19:54:04,536 INFO [RS:1;867b237d0fa7:37273 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;867b237d0fa7:37273. 2024-11-14T19:54:04,536 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2bf12d8ce7d3607de699a010d06fabfe, disabling compactions & flushes 2024-11-14T19:54:04,537 DEBUG [RS:0;867b237d0fa7:38355 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:04,537 DEBUG [RS:1;867b237d0fa7:37273 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:04,537 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:54:04,537 DEBUG [RS:0;867b237d0fa7:38355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:04,537 DEBUG [RS:1;867b237d0fa7:37273 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:04,537 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:54:04,537 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:54:04,537 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. after waiting 0 ms 2024-11-14T19:54:04,537 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:54:04,537 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,37273,1731614006888; all regions closed. 2024-11-14T19:54:04,537 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:54:04,537 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:54:04,537 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:54:04,538 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2bf12d8ce7d3607de699a010d06fabfe 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-14T19:54:04,538 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T19:54:04,538 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 2bf12d8ce7d3607de699a010d06fabfe=TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.} 2024-11-14T19:54:04,538 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,538 DEBUG [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2bf12d8ce7d3607de699a010d06fabfe 2024-11-14T19:54:04,538 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:54:04,538 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:54:04,538 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,538 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:54:04,538 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:54:04,538 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,538 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:54:04,538 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,538 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,538 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-14T19:54:04,539 ERROR [FSHLog-0-hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1-prefix:867b237d0fa7,38355,1731614005730.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,539 WARN [FSHLog-0-hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1-prefix:867b237d0fa7,38355,1731614005730.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,539 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C38355%2C1731614005730.meta:.meta(num 1731614006711) roll requested 2024-11-14T19:54:04,539 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38355%2C1731614005730.meta.1731614044539.meta 2024-11-14T19:54:04,542 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,542 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,542 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 2024-11-14T19:54:04,543 WARN [IPC Server handler 1 on default port 35675 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741837_1013 2024-11-14T19:54:04,543 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 after 1ms 2024-11-14T19:54:04,544 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/d46fcd189dbc497c9629d0b7d50a34ee is 1080, key is row0018/info:/1731614044242/Put/seqid=0 2024-11-14T19:54:04,548 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,548 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,548 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,548 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,548 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,548 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614044539.meta 2024-11-14T19:54:04,549 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,549 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,549 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta 2024-11-14T19:54:04,550 WARN [IPC Server handler 4 on default port 35675 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741834_1010 2024-11-14T19:54:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741890_1075 (size=11421) 2024-11-14T19:54:04,550 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta after 1ms 2024-11-14T19:54:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741890_1075 (size=11421) 2024-11-14T19:54:04,550 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41829:41829),(127.0.0.1/127.0.0.1:39365:39365)] 2024-11-14T19:54:04,550 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta is not closed yet, will try archiving it next time 2024-11-14T19:54:04,551 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/d46fcd189dbc497c9629d0b7d50a34ee 2024-11-14T19:54:04,558 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/.tmp/info/d46fcd189dbc497c9629d0b7d50a34ee as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d46fcd189dbc497c9629d0b7d50a34ee 2024-11-14T19:54:04,565 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d46fcd189dbc497c9629d0b7d50a34ee, entries=6, sequenceid=65, filesize=11.2 K 2024-11-14T19:54:04,566 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 2bf12d8ce7d3607de699a010d06fabfe in 29ms, sequenceid=65, compaction requested=false 2024-11-14T19:54:04,568 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a618fe1e48ac46afa6253c49997580ca, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d3169638db134ceba0b5e1ded08723a6, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a6c745358b6f4cbcae13a5d890685003, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/6ec55c0f3e8145bd89e8abd836f3ee0a] to archive 2024-11-14T19:54:04,569 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/info/1d6a02ee04ce4d13a0ab6041d391a9cb is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe./info:regioninfo/1731614007376/Put/seqid=0 2024-11-14T19:54:04,569 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T19:54:04,571 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,571 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK], DatanodeInfoWithStorage[127.0.0.1:42487,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK]) is bad. 2024-11-14T19:54:04,571 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741891_1077 2024-11-14T19:54:04,572 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a618fe1e48ac46afa6253c49997580ca to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a618fe1e48ac46afa6253c49997580ca 2024-11-14T19:54:04,572 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40727,DS-dd69739c-8186-4404-b0a6-491b63108c4d,DISK] 2024-11-14T19:54:04,573 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,573 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741892_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK], DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK]) is bad. 2024-11-14T19:54:04,573 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/55d40ee99da14e9b9c953062479c81b0 2024-11-14T19:54:04,573 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741892_1078 2024-11-14T19:54:04,574 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42741,DS-a2f8445c-e6dc-46f3-8a87-a0805f5599f0,DISK] 2024-11-14T19:54:04,574 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/b1e30288d7e343458f5ed7d847527efe 2024-11-14T19:54:04,575 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,575 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:42487,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:54:04,575 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741893_1079 2024-11-14T19:54:04,576 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:54:04,576 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d3169638db134ceba0b5e1ded08723a6 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/d3169638db134ceba0b5e1ded08723a6 2024-11-14T19:54:04,577 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a6c745358b6f4cbcae13a5d890685003 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/a6c745358b6f4cbcae13a5d890685003 2024-11-14T19:54:04,579 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/6ec55c0f3e8145bd89e8abd836f3ee0a to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/info/6ec55c0f3e8145bd89e8abd836f3ee0a 2024-11-14T19:54:04,579 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=867b237d0fa7:46651 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T19:54:04,580 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a618fe1e48ac46afa6253c49997580ca=10347, 55d40ee99da14e9b9c953062479c81b0=12506, b1e30288d7e343458f5ed7d847527efe=17994, d3169638db134ceba0b5e1ded08723a6=6027, a6c745358b6f4cbcae13a5d890685003=6027, 6ec55c0f3e8145bd89e8abd836f3ee0a=11421] 2024-11-14T19:54:04,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741894_1080 (size=7089) 2024-11-14T19:54:04,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741894_1080 (size=7089) 2024-11-14T19:54:04,582 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/info/1d6a02ee04ce4d13a0ab6041d391a9cb 2024-11-14T19:54:04,584 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2bf12d8ce7d3607de699a010d06fabfe/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-14T19:54:04,585 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:54:04,585 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2bf12d8ce7d3607de699a010d06fabfe: Waiting for close lock at 1731614044536Running coprocessor pre-close hooks at 1731614044536Disabling compacts and flushes for region at 1731614044536Disabling writes for close at 1731614044537 (+1 ms)Obtaining lock to block concurrent updates at 1731614044538 (+1 ms)Preparing flush snapshotting stores in 2bf12d8ce7d3607de699a010d06fabfe at 1731614044538Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1731614044538Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. at 1731614044539 (+1 ms)Flushing 2bf12d8ce7d3607de699a010d06fabfe/info: creating writer at 1731614044540 (+1 ms)Flushing 2bf12d8ce7d3607de699a010d06fabfe/info: appending metadata at 1731614044544 (+4 ms)Flushing 2bf12d8ce7d3607de699a010d06fabfe/info: closing flushed file at 1731614044544Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e67ad60: reopening flushed file at 1731614044557 (+13 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 2bf12d8ce7d3607de699a010d06fabfe in 29ms, sequenceid=65, compaction requested=false at 1731614044566 (+9 ms)Writing region close event to WAL at 1731614044580 (+14 ms)Running coprocessor post-close hooks at 1731614044585 (+5 ms)Closed at 1731614044585 2024-11-14T19:54:04,585 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731614007001.2bf12d8ce7d3607de699a010d06fabfe. 2024-11-14T19:54:04,604 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/ns/4795432a3f314f8fb81865e6618fab0f is 43, key is default/ns:d/1731614006786/Put/seqid=0 2024-11-14T19:54:04,606 WARN [IPC Server handler 4 on default port 35675 {}] net.NetworkTopology(357): The cluster does not contain node: /default-rack/127.0.0.1:44027 2024-11-14T19:54:04,606 WARN [IPC Server handler 4 on default port 35675 {}] net.NetworkTopology(357): The cluster does not contain node: /default-rack/127.0.0.1:44027 2024-11-14T19:54:04,607 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:04,607 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1048535874-172.17.0.2-1731614004102:blk_1073741895_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK], DatanodeInfoWithStorage[127.0.0.1:42487,DS-18b8efbf-59c6-4fba-898e-3f0e3dcbc9bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK]) is bad. 2024-11-14T19:54:04,607 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-1048535874-172.17.0.2-1731614004102:blk_1073741895_1081 2024-11-14T19:54:04,608 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7c03f61-fab7-49db-a217-682cac712c93,DISK] 2024-11-14T19:54:04,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741896_1082 (size=5153) 2024-11-14T19:54:04,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741896_1082 (size=5153) 2024-11-14T19:54:04,612 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/ns/4795432a3f314f8fb81865e6618fab0f 2024-11-14T19:54:04,627 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.1731614026408 to hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs/867b237d0fa7%2C38355%2C1731614005730.1731614026408 2024-11-14T19:54:04,637 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/table/96c2a2a1e2504ae59c637ef4c9cb080e is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731614007387/Put/seqid=0 2024-11-14T19:54:04,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741897_1083 (size=5424) 2024-11-14T19:54:04,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741897_1083 (size=5424) 2024-11-14T19:54:04,644 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/table/96c2a2a1e2504ae59c637ef4c9cb080e 2024-11-14T19:54:04,657 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/info/1d6a02ee04ce4d13a0ab6041d391a9cb as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/info/1d6a02ee04ce4d13a0ab6041d391a9cb 2024-11-14T19:54:04,665 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/info/1d6a02ee04ce4d13a0ab6041d391a9cb, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T19:54:04,666 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/ns/4795432a3f314f8fb81865e6618fab0f as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/ns/4795432a3f314f8fb81865e6618fab0f 2024-11-14T19:54:04,673 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/ns/4795432a3f314f8fb81865e6618fab0f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T19:54:04,675 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/.tmp/table/96c2a2a1e2504ae59c637ef4c9cb080e as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/table/96c2a2a1e2504ae59c637ef4c9cb080e 2024-11-14T19:54:04,682 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/table/96c2a2a1e2504ae59c637ef4c9cb080e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T19:54:04,683 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 145ms, sequenceid=11, compaction requested=false 2024-11-14T19:54:04,689 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T19:54:04,690 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:54:04,690 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:54:04,690 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614044538Running coprocessor pre-close hooks at 1731614044538Disabling compacts and flushes for region at 1731614044538Disabling writes for close at 1731614044538Obtaining lock to block concurrent updates at 1731614044538Preparing flush snapshotting stores in 1588230740 at 1731614044538Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731614044539 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731614044551 (+12 ms)Flushing 1588230740/info: creating writer at 1731614044551Flushing 1588230740/info: appending metadata at 1731614044568 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731614044568Flushing 1588230740/ns: creating writer at 1731614044588 (+20 ms)Flushing 1588230740/ns: appending metadata at 1731614044604 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731614044604Flushing 1588230740/table: creating writer at 1731614044620 (+16 ms)Flushing 1588230740/table: appending metadata at 1731614044637 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731614044637Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6da58d1c: reopening flushed file at 1731614044655 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f85d7b5: reopening flushed file at 1731614044665 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2547a8fb: reopening flushed file at 1731614044674 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 145ms, sequenceid=11, compaction requested=false at 1731614044683 (+9 ms)Writing region close event to WAL at 1731614044685 (+2 ms)Running coprocessor post-close hooks at 1731614044690 (+5 ms)Closed at 1731614044690 2024-11-14T19:54:04,690 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:54:04,738 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,38355,1731614005730; all regions closed. 2024-11-14T19:54:04,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,739 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,739 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,739 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,739 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:04,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741889_1073 (size=825) 2024-11-14T19:54:04,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741889_1073 (size=825) 2024-11-14T19:54:04,970 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T19:54:04,971 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T19:54:04,972 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:54:05,148 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T19:54:05,148 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T19:54:06,147 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:54:06,883 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T19:54:06,883 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T19:54:07,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741836_1012 (size=76) 2024-11-14T19:54:07,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:54:07,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741862_1045 (size=13591) 2024-11-14T19:54:08,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:54:08,546 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 after 4003ms 2024-11-14T19:54:08,552 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta after 4003ms 2024-11-14T19:54:09,061 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@45579bcb {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1048535874-172.17.0.2-1731614004102:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:42741,null,null]) java.net.ConnectException: Call From 867b237d0fa7/172.17.0.2 to localhost:46739 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T19:54:09,542 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T19:54:09,548 DEBUG [RS:1;867b237d0fa7:37273 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs 2024-11-14T19:54:09,549 INFO [RS:1;867b237d0fa7:37273 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C37273%2C1731614006888:(num 1731614007102) 2024-11-14T19:54:09,549 DEBUG [RS:1;867b237d0fa7:37273 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:09,549 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:54:09,549 INFO [RS:1;867b237d0fa7:37273 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:54:09,550 INFO [RS:1;867b237d0fa7:37273 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T19:54:09,550 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:54:09,550 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:54:09,550 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:54:09,550 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:54:09,551 INFO [RS:1;867b237d0fa7:37273 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:54:09,551 INFO [RS:1;867b237d0fa7:37273 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37273 2024-11-14T19:54:09,586 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:54:09,598 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,37273,1731614006888 2024-11-14T19:54:09,598 INFO [RS:1;867b237d0fa7:37273 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:54:09,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,606 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,37273,1731614006888] 2024-11-14T19:54:09,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:09,614 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,37273,1731614006888 already deleted, retry=false 2024-11-14T19:54:09,614 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,37273,1731614006888 expired; onlineServers=1 2024-11-14T19:54:09,706 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:09,706 INFO [RS:1;867b237d0fa7:37273 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:54:09,707 DEBUG [pool-381-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37273-0x1013c169ef70002, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:09,707 INFO [RS:1;867b237d0fa7:37273 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,37273,1731614006888; zookeeper connection closed. 2024-11-14T19:54:09,707 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d86ffe6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d86ffe6 2024-11-14T19:54:09,740 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T19:54:09,750 DEBUG [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs 2024-11-14T19:54:09,750 INFO [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C38355%2C1731614005730.meta:.meta(num 1731614044539) 2024-11-14T19:54:09,751 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,751 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,751 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,751 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741882_1066 (size=15140) 2024-11-14T19:54:09,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741882_1066 (size=15140) 2024-11-14T19:54:09,755 DEBUG [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/oldWALs 2024-11-14T19:54:09,755 INFO [RS:0;867b237d0fa7:38355 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C38355%2C1731614005730:(num 1731614044209) 2024-11-14T19:54:09,755 DEBUG [RS:0;867b237d0fa7:38355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:09,755 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:54:09,755 INFO [RS:0;867b237d0fa7:38355 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:54:09,756 INFO [RS:0;867b237d0fa7:38355 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T19:54:09,756 INFO [RS:0;867b237d0fa7:38355 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:54:09,756 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:54:09,756 INFO [RS:0;867b237d0fa7:38355 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38355 2024-11-14T19:54:09,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,38355,1731614005730 2024-11-14T19:54:09,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:54:09,767 INFO [RS:0;867b237d0fa7:38355 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:54:09,775 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,38355,1731614005730] 2024-11-14T19:54:09,783 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,38355,1731614005730 already deleted, retry=false 2024-11-14T19:54:09,783 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,38355,1731614005730 expired; onlineServers=0 2024-11-14T19:54:09,784 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,46651,1731614005599' ***** 2024-11-14T19:54:09,784 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:54:09,784 INFO [M:0;867b237d0fa7:46651 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:54:09,784 INFO [M:0;867b237d0fa7:46651 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:54:09,784 DEBUG [M:0;867b237d0fa7:46651 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:54:09,784 DEBUG [M:0;867b237d0fa7:46651 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:54:09,784 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:54:09,784 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614006053 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614006053,5,FailOnTimeoutGroup] 2024-11-14T19:54:09,784 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614006053 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614006053,5,FailOnTimeoutGroup] 2024-11-14T19:54:09,784 INFO [M:0;867b237d0fa7:46651 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:54:09,784 INFO [M:0;867b237d0fa7:46651 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:54:09,785 DEBUG [M:0;867b237d0fa7:46651 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:54:09,785 INFO [M:0;867b237d0fa7:46651 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:54:09,785 INFO [M:0;867b237d0fa7:46651 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:54:09,785 INFO [M:0;867b237d0fa7:46651 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:54:09,785 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:54:09,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:54:09,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:09,792 DEBUG [M:0;867b237d0fa7:46651 {}] zookeeper.ZKUtil(347): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T19:54:09,792 WARN [M:0;867b237d0fa7:46651 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T19:54:09,793 INFO [M:0;867b237d0fa7:46651 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/.lastflushedseqids 2024-11-14T19:54:09,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741898_1084 (size=130) 2024-11-14T19:54:09,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741898_1084 (size=130) 2024-11-14T19:54:09,803 INFO [M:0;867b237d0fa7:46651 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:54:09,803 INFO [M:0;867b237d0fa7:46651 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:54:09,803 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:54:09,803 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:09,803 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:09,804 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:54:09,804 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:09,804 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-14T19:54:09,824 DEBUG [M:0;867b237d0fa7:46651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c95ababa67a49cc94900cc3fdaa9d92 is 82, key is hbase:meta,,1/info:regioninfo/1731614006743/Put/seqid=0 2024-11-14T19:54:09,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741899_1085 (size=5672) 2024-11-14T19:54:09,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741899_1085 (size=5672) 2024-11-14T19:54:09,830 INFO [M:0;867b237d0fa7:46651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c95ababa67a49cc94900cc3fdaa9d92 2024-11-14T19:54:09,853 DEBUG [M:0;867b237d0fa7:46651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6cfc012996074a94ae6df73b53f78fdf is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731614007393/Put/seqid=0 2024-11-14T19:54:09,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741900_1086 (size=6255) 2024-11-14T19:54:09,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741900_1086 (size=6255) 2024-11-14T19:54:09,858 INFO [M:0;867b237d0fa7:46651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6cfc012996074a94ae6df73b53f78fdf 2024-11-14T19:54:09,865 INFO [M:0;867b237d0fa7:46651 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6cfc012996074a94ae6df73b53f78fdf 2024-11-14T19:54:09,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:09,875 INFO [RS:0;867b237d0fa7:38355 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:54:09,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38355-0x1013c169ef70001, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:09,875 INFO [RS:0;867b237d0fa7:38355 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,38355,1731614005730; zookeeper connection closed. 2024-11-14T19:54:09,876 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b1c6bf5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b1c6bf5 2024-11-14T19:54:09,876 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-14T19:54:09,882 DEBUG [M:0;867b237d0fa7:46651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b91734d290d84f7dae6183ff9ca400ac is 69, key is 867b237d0fa7,37273,1731614006888/rs:state/1731614006952/Put/seqid=0 2024-11-14T19:54:09,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741901_1087 (size=5224) 2024-11-14T19:54:09,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741901_1087 (size=5224) 2024-11-14T19:54:09,888 INFO [M:0;867b237d0fa7:46651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b91734d290d84f7dae6183ff9ca400ac 2024-11-14T19:54:09,908 DEBUG [M:0;867b237d0fa7:46651 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0bf89e2988a45d8bb36cfbc53ae92dc is 52, key is load_balancer_on/state:d/1731614006873/Put/seqid=0 2024-11-14T19:54:09,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741902_1088 (size=5056) 2024-11-14T19:54:09,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741902_1088 (size=5056) 2024-11-14T19:54:09,914 INFO [M:0;867b237d0fa7:46651 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0bf89e2988a45d8bb36cfbc53ae92dc 2024-11-14T19:54:09,921 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7c95ababa67a49cc94900cc3fdaa9d92 as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c95ababa67a49cc94900cc3fdaa9d92 2024-11-14T19:54:09,926 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7c95ababa67a49cc94900cc3fdaa9d92, entries=8, sequenceid=60, filesize=5.5 K 2024-11-14T19:54:09,927 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6cfc012996074a94ae6df73b53f78fdf as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6cfc012996074a94ae6df73b53f78fdf 2024-11-14T19:54:09,932 INFO [M:0;867b237d0fa7:46651 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6cfc012996074a94ae6df73b53f78fdf 2024-11-14T19:54:09,933 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6cfc012996074a94ae6df73b53f78fdf, entries=6, sequenceid=60, filesize=6.1 K 2024-11-14T19:54:09,934 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b91734d290d84f7dae6183ff9ca400ac as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b91734d290d84f7dae6183ff9ca400ac 2024-11-14T19:54:09,939 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b91734d290d84f7dae6183ff9ca400ac, entries=2, sequenceid=60, filesize=5.1 K 2024-11-14T19:54:09,940 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0bf89e2988a45d8bb36cfbc53ae92dc as hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f0bf89e2988a45d8bb36cfbc53ae92dc 2024-11-14T19:54:09,945 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f0bf89e2988a45d8bb36cfbc53ae92dc, entries=1, sequenceid=60, filesize=4.9 K 2024-11-14T19:54:09,946 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=60, compaction requested=false 2024-11-14T19:54:09,948 INFO [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:09,948 DEBUG [M:0;867b237d0fa7:46651 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614049803Disabling compacts and flushes for region at 1731614049803Disabling writes for close at 1731614049804 (+1 ms)Obtaining lock to block concurrent updates at 1731614049804Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731614049804Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731614049804Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731614049806 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731614049806Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731614049824 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731614049824Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614049836 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614049852 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614049852Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614049865 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614049881 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614049881Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614049893 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614049907 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614049907Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dc7422: reopening flushed file at 1731614049920 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c39160: reopening flushed file at 1731614049926 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f255ad6: reopening flushed file at 1731614049933 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33ec416c: reopening flushed file at 1731614049939 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=60, compaction requested=false at 1731614049946 (+7 ms)Writing region close event to WAL at 1731614049948 (+2 ms)Closed at 1731614049948 2024-11-14T19:54:09,949 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,949 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,949 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,949 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,949 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:09,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42487 is added to blk_1073741880_1063 (size=1045) 2024-11-14T19:54:09,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741880_1063 (size=1045) 2024-11-14T19:54:09,952 INFO [M:0;867b237d0fa7:46651 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:54:09,952 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:54:09,952 INFO [M:0;867b237d0fa7:46651 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46651 2024-11-14T19:54:09,953 INFO [M:0;867b237d0fa7:46651 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:54:10,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741835_1011 (size=393) 2024-11-14T19:54:10,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38091 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:54:10,058 INFO [M:0;867b237d0fa7:46651 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:54:10,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:10,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46651-0x1013c169ef70000, quorum=127.0.0.1:50637, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:10,084 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73d784f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:10,084 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7d5058{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:10,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:10,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dc8d3e2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:10,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e016e98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:10,086 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:10,087 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:10,087 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1048535874-172.17.0.2-1731614004102 (Datanode Uuid adcea9a4-2ef4-4d3a-bca1-bb11d75b183b) service to localhost/127.0.0.1:35675 2024-11-14T19:54:10,087 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:10,086 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d20f64b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:42741,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:46739 , LocalHost:localPort 867b237d0fa7/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T19:54:10,087 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d20f64b {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1048535874-172.17.0.2-1731614004102:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42487,null,null], DatanodeInfoWithStorage[127.0.0.1:42741,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1048535874-172.17.0.2-1731614004102 2024-11-14T19:54:10,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data3/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:10,088 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d20f64b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42741,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1048535874-172.17.0.2-1731614004102 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:10,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data4/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:10,088 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d20f64b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42487,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1048535874-172.17.0.2-1731614004102 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:10,088 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3d20f64b {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42741,null,null], DatanodeInfoWithStorage[127.0.0.1:42487,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1048535874-172.17.0.2-1731614004102:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:42741,null,null], DatanodeInfoWithStorage[127.0.0.1:42487,null,null]] 2024-11-14T19:54:10,088 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:10,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b23fe4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:10,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9de090{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:10,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:10,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c34cffb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:10,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d396542{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:10,094 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:10,094 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:10,094 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:10,094 WARN [BP-1048535874-172.17.0.2-1731614004102 heartbeating to localhost/127.0.0.1:35675 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1048535874-172.17.0.2-1731614004102 (Datanode Uuid d39aab7b-41ea-4b5b-afc8-f25e3f1f85eb) service to localhost/127.0.0.1:35675 2024-11-14T19:54:10,095 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data9/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:10,095 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/cluster_53996dea-7e23-6074-a468-9d83e511fe7f/data/data10/current/BP-1048535874-172.17.0.2-1731614004102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:10,095 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:10,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ac8078c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:54:10,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44248139{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:10,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:10,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2487e01a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:10,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@279773e8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:10,112 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:54:10,116 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:54:10,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:10,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:54:10,178 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 79) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35675 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:35675 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33087 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f8440bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:35675 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35675 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:35675 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:35675 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35675 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35675 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:35675 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f8440bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33087 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35675 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:35675 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=240 (was 275), ProcessCount=11 (was 11), AvailableMemoryMB=5649 (was 6128) 2024-11-14T19:54:10,185 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=240, ProcessCount=11, AvailableMemoryMB=5649 2024-11-14T19:54:10,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:54:10,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.log.dir so I do NOT create it in target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a0e66b61-238e-1f0d-25d6-4cd0022d35cc/hadoop.tmp.dir so I do NOT create it in target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab, deleteOnExit=true 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/test.cache.data in system properties and HBase conf 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:54:10,186 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:54:10,186 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:54:10,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:54:10,202 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:54:10,419 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:10,425 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:10,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:10,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:10,430 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:10,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:10,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13514823{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:10,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@349c14ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:10,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@412993b5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-42533-hadoop-hdfs-3_4_1-tests_jar-_-any-2580484973483033595/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:54:10,536 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4abaeae4{HTTP/1.1, (http/1.1)}{localhost:42533} 2024-11-14T19:54:10,536 INFO [Time-limited test {}] server.Server(415): Started @162582ms 2024-11-14T19:54:10,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:10,553 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:54:10,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:10,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T19:54:10,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:54:10,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:54:10,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:54:10,711 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:10,716 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:10,722 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:10,722 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:10,722 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:10,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5267df65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:10,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6cb794bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:10,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5514adb5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-43685-hadoop-hdfs-3_4_1-tests_jar-_-any-13081002486514289348/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:10,853 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@362b6215{HTTP/1.1, (http/1.1)}{localhost:43685} 2024-11-14T19:54:10,854 INFO [Time-limited test {}] server.Server(415): Started @162900ms 2024-11-14T19:54:10,856 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:10,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:10,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:10,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:10,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:10,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:54:10,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77037bf8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:10,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69151493{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:11,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a305799{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-45745-hadoop-hdfs-3_4_1-tests_jar-_-any-10848413979999511938/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:11,015 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d6e24b{HTTP/1.1, (http/1.1)}{localhost:45745} 2024-11-14T19:54:11,015 INFO [Time-limited test {}] server.Server(415): Started @163061ms 2024-11-14T19:54:11,017 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:11,509 WARN [Thread-1202 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data2/current/BP-63474148-172.17.0.2-1731614050215/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:11,509 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data1/current/BP-63474148-172.17.0.2-1731614050215/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:11,526 WARN [Thread-1166 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:11,529 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xee0bf6a92e32e644 with lease ID 0x6fe81fce59c64c90: Processing first storage report for DS-b46607e9-a713-4575-9edb-111f698c75be from datanode DatanodeRegistration(127.0.0.1:34575, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=39001, infoSecurePort=0, ipcPort=34993, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215) 2024-11-14T19:54:11,529 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xee0bf6a92e32e644 with lease ID 0x6fe81fce59c64c90: from storage DS-b46607e9-a713-4575-9edb-111f698c75be node DatanodeRegistration(127.0.0.1:34575, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=39001, infoSecurePort=0, ipcPort=34993, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:11,529 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xee0bf6a92e32e644 with lease ID 0x6fe81fce59c64c90: Processing first storage report for DS-49c94482-bbeb-4683-9b69-09a70914bbd1 from datanode DatanodeRegistration(127.0.0.1:34575, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=39001, infoSecurePort=0, ipcPort=34993, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215) 2024-11-14T19:54:11,529 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xee0bf6a92e32e644 with lease ID 0x6fe81fce59c64c90: from storage DS-49c94482-bbeb-4683-9b69-09a70914bbd1 node DatanodeRegistration(127.0.0.1:34575, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=39001, infoSecurePort=0, ipcPort=34993, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:11,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:11,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:11,692 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data3/current/BP-63474148-172.17.0.2-1731614050215/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:11,692 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data4/current/BP-63474148-172.17.0.2-1731614050215/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:11,709 WARN [Thread-1189 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:11,711 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe561bffc6d8362c0 with lease ID 0x6fe81fce59c64c91: Processing first storage report for DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2 from datanode DatanodeRegistration(127.0.0.1:46855, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=42483, infoSecurePort=0, ipcPort=45529, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215) 2024-11-14T19:54:11,711 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe561bffc6d8362c0 with lease ID 0x6fe81fce59c64c91: from storage DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2 node DatanodeRegistration(127.0.0.1:46855, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=42483, infoSecurePort=0, ipcPort=45529, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:11,711 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe561bffc6d8362c0 with lease ID 0x6fe81fce59c64c91: Processing first storage report for DS-a3311aa5-8dd7-4193-af9e-df54a217e897 from datanode DatanodeRegistration(127.0.0.1:46855, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=42483, infoSecurePort=0, ipcPort=45529, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215) 2024-11-14T19:54:11,711 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe561bffc6d8362c0 with lease ID 0x6fe81fce59c64c91: from storage DS-a3311aa5-8dd7-4193-af9e-df54a217e897 node DatanodeRegistration(127.0.0.1:46855, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=42483, infoSecurePort=0, ipcPort=45529, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:11,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1 2024-11-14T19:54:11,773 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/zookeeper_0, clientPort=55467, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:54:11,774 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55467 2024-11-14T19:54:11,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:11,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:11,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:54:11,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:54:11,785 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65 with version=8 2024-11-14T19:54:11,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:54:11,787 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:54:11,787 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:54:11,788 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39903 2024-11-14T19:54:11,789 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39903 connecting to ZooKeeper ensemble=127.0.0.1:55467 2024-11-14T19:54:11,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:399030x0, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:54:11,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39903-0x1013c1753650000 connected 2024-11-14T19:54:11,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:11,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:11,891 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:11,892 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65, hbase.cluster.distributed=false 2024-11-14T19:54:11,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:54:11,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39903 2024-11-14T19:54:11,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39903 2024-11-14T19:54:11,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39903 2024-11-14T19:54:11,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39903 2024-11-14T19:54:11,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39903 2024-11-14T19:54:11,916 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:54:11,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:11,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:11,916 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:54:11,916 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:11,917 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:54:11,917 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:54:11,917 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:54:11,917 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34507 2024-11-14T19:54:11,919 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34507 connecting to ZooKeeper ensemble=127.0.0.1:55467 2024-11-14T19:54:11,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:11,921 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:11,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345070x0, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:54:11,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:345070x0, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:11,931 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34507-0x1013c1753650001 connected 2024-11-14T19:54:11,931 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:54:11,932 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:54:11,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:54:11,933 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:54:11,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34507 2024-11-14T19:54:11,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34507 2024-11-14T19:54:11,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34507 2024-11-14T19:54:11,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34507 2024-11-14T19:54:11,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34507 2024-11-14T19:54:11,948 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:39903 2024-11-14T19:54:11,950 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,39903,1731614051787 2024-11-14T19:54:11,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:11,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:11,959 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,39903,1731614051787 2024-11-14T19:54:11,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:54:11,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:11,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:11,968 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:54:11,968 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,39903,1731614051787 from backup master directory 2024-11-14T19:54:11,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,39903,1731614051787 2024-11-14T19:54:11,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:11,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:11,976 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:54:11,976 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,39903,1731614051787 2024-11-14T19:54:11,982 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/hbase.id] with ID: ab9d7f2c-7140-4757-9ef2-8c39e2681bdb 2024-11-14T19:54:11,982 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/.tmp/hbase.id 2024-11-14T19:54:11,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:54:11,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:54:11,991 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/.tmp/hbase.id]:[hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/hbase.id] 2024-11-14T19:54:12,008 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:12,009 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:54:12,011 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T19:54:12,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:54:12,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:54:12,025 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:54:12,026 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:54:12,026 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:54:12,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:54:12,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:54:12,035 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store 2024-11-14T19:54:12,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:54:12,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:54:12,042 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:12,042 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:54:12,042 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:12,043 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:12,043 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:54:12,043 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:12,043 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:12,043 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614052042Disabling compacts and flushes for region at 1731614052042Disabling writes for close at 1731614052043 (+1 ms)Writing region close event to WAL at 1731614052043Closed at 1731614052043 2024-11-14T19:54:12,043 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/.initializing 2024-11-14T19:54:12,044 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787 2024-11-14T19:54:12,046 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C39903%2C1731614051787, suffix=, logDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787, archiveDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/oldWALs, maxLogs=10 2024-11-14T19:54:12,046 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39903%2C1731614051787.1731614052046 2024-11-14T19:54:12,051 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 2024-11-14T19:54:12,052 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42483:42483),(127.0.0.1/127.0.0.1:39001:39001)] 2024-11-14T19:54:12,052 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:54:12,053 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:12,053 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,053 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:54:12,055 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:54:12,057 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:12,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:54:12,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:12,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:54:12,060 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:12,061 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,061 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,061 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,063 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,063 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,063 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:54:12,065 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:12,068 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:54:12,069 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714007, jitterRate=-0.09209400415420532}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:54:12,070 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731614052053Initializing all the Stores at 1731614052054 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052054Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614052054Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614052054Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614052054Cleaning up temporary data from old regions at 1731614052063 (+9 ms)Region opened successfully at 1731614052070 (+7 ms) 2024-11-14T19:54:12,070 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:54:12,075 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51bb4e0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:54:12,076 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:54:12,076 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:54:12,076 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:54:12,077 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:54:12,077 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T19:54:12,078 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T19:54:12,078 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:54:12,080 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:54:12,081 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:54:12,089 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:54:12,089 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:54:12,090 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:54:12,097 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:54:12,098 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:54:12,099 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:54:12,109 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:54:12,110 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:54:12,117 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:54:12,120 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:54:12,125 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:54:12,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:12,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:12,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,134 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,39903,1731614051787, sessionid=0x1013c1753650000, setting cluster-up flag (Was=false) 2024-11-14T19:54:12,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,176 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:54:12,177 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,39903,1731614051787 2024-11-14T19:54:12,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,217 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:54:12,219 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,39903,1731614051787 2024-11-14T19:54:12,220 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:54:12,222 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:12,223 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:54:12,223 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:54:12,223 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,39903,1731614051787 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:54:12,225 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:12,225 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:12,225 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:12,225 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:12,226 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:54:12,226 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,226 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:54:12,226 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731614082227 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:54:12,227 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,228 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:12,228 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:54:12,228 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:54:12,228 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:54:12,228 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:54:12,228 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:54:12,228 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:54:12,229 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614052228,5,FailOnTimeoutGroup] 2024-11-14T19:54:12,229 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614052229,5,FailOnTimeoutGroup] 2024-11-14T19:54:12,229 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,229 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:54:12,229 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,229 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,229 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,229 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:54:12,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:54:12,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:54:12,238 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:54:12,239 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65 2024-11-14T19:54:12,239 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(746): ClusterId : ab9d7f2c-7140-4757-9ef2-8c39e2681bdb 2024-11-14T19:54:12,239 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:54:12,249 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:54:12,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:54:12,249 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:54:12,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:54:12,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:12,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:54:12,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:54:12,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:54:12,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:54:12,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,254 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:54:12,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:54:12,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:54:12,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:54:12,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:54:12,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740 2024-11-14T19:54:12,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740 2024-11-14T19:54:12,260 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:54:12,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:54:12,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:54:12,260 DEBUG [RS:0;867b237d0fa7:34507 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@174170a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:54:12,261 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:54:12,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:54:12,264 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:54:12,265 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867086, jitterRate=0.10255710780620575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:54:12,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731614052250Initializing all the Stores at 1731614052250Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052250Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052251 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614052251Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052251Cleaning up temporary data from old regions at 1731614052260 (+9 ms)Region opened successfully at 1731614052266 (+6 ms) 2024-11-14T19:54:12,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:54:12,266 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:54:12,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:54:12,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:54:12,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:54:12,267 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:54:12,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614052266Disabling compacts and flushes for region at 1731614052266Disabling writes for close at 1731614052266Writing region close event to WAL at 1731614052267 (+1 ms)Closed at 1731614052267 2024-11-14T19:54:12,268 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:12,268 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:54:12,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:54:12,270 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:54:12,271 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:54:12,276 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:34507 2024-11-14T19:54:12,276 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:54:12,276 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:54:12,276 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:54:12,277 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,39903,1731614051787 with port=34507, startcode=1731614051916 2024-11-14T19:54:12,278 DEBUG [RS:0;867b237d0fa7:34507 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:54:12,280 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41699, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:54:12,280 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39903 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,280 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39903 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,282 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65 2024-11-14T19:54:12,282 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32953 2024-11-14T19:54:12,282 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:54:12,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:54:12,293 DEBUG [RS:0;867b237d0fa7:34507 {}] zookeeper.ZKUtil(111): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,293 WARN [RS:0;867b237d0fa7:34507 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:54:12,293 INFO [RS:0;867b237d0fa7:34507 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:54:12,293 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,293 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,34507,1731614051916] 2024-11-14T19:54:12,297 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:54:12,299 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:54:12,299 INFO [RS:0;867b237d0fa7:34507 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:54:12,299 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,299 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:54:12,300 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:54:12,300 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:54:12,301 DEBUG [RS:0;867b237d0fa7:34507 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:54:12,302 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,302 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,302 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,302 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,302 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,302 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,34507,1731614051916-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:54:12,316 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:54:12,316 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,34507,1731614051916-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,316 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,316 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.Replication(171): 867b237d0fa7,34507,1731614051916 started 2024-11-14T19:54:12,329 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,329 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,34507,1731614051916, RpcServer on 867b237d0fa7/172.17.0.2:34507, sessionid=0x1013c1753650001 2024-11-14T19:54:12,329 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:54:12,329 DEBUG [RS:0;867b237d0fa7:34507 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,330 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,34507,1731614051916' 2024-11-14T19:54:12,330 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:54:12,330 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,34507,1731614051916' 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:54:12,331 DEBUG [RS:0;867b237d0fa7:34507 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:54:12,331 INFO [RS:0;867b237d0fa7:34507 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:54:12,331 INFO [RS:0;867b237d0fa7:34507 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:54:12,422 WARN [867b237d0fa7:39903 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T19:54:12,436 INFO [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C34507%2C1731614051916, suffix=, logDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916, archiveDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs, maxLogs=32 2024-11-14T19:54:12,437 INFO [RS:0;867b237d0fa7:34507 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:12,446 INFO [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:12,447 DEBUG [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39001:39001),(127.0.0.1/127.0.0.1:42483:42483)] 2024-11-14T19:54:12,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:12,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:12,672 DEBUG [867b237d0fa7:39903 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:54:12,673 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,675 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,34507,1731614051916, state=OPENING 2024-11-14T19:54:12,684 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:54:12,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:12,694 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:54:12,694 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:12,694 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,34507,1731614051916}] 2024-11-14T19:54:12,694 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:12,851 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:54:12,857 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47505, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:54:12,862 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:54:12,862 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:54:12,865 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C34507%2C1731614051916.meta, suffix=.meta, logDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916, archiveDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs, maxLogs=32 2024-11-14T19:54:12,865 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta 2024-11-14T19:54:12,873 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta 2024-11-14T19:54:12,879 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42483:42483),(127.0.0.1/127.0.0.1:39001:39001)] 2024-11-14T19:54:12,879 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:54:12,880 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:54:12,880 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:54:12,880 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:54:12,880 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:54:12,880 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:12,880 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:54:12,880 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:54:12,882 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:54:12,883 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:54:12,883 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:54:12,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:54:12,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:54:12,885 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:54:12,885 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:54:12,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:54:12,887 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:12,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:12,887 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:54:12,888 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740 2024-11-14T19:54:12,890 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740 2024-11-14T19:54:12,891 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:54:12,891 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:54:12,892 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:54:12,893 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:54:12,894 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747032, jitterRate=-0.05009999871253967}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:54:12,894 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:54:12,895 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731614052880Writing region info on filesystem at 1731614052880Initializing all the Stores at 1731614052881 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052881Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052882 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614052882Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614052882Cleaning up temporary data from old regions at 1731614052891 (+9 ms)Running coprocessor post-open hooks at 1731614052894 (+3 ms)Region opened successfully at 1731614052895 (+1 ms) 2024-11-14T19:54:12,896 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731614052850 2024-11-14T19:54:12,899 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:54:12,900 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:54:12,901 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,902 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,34507,1731614051916, state=OPEN 2024-11-14T19:54:12,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:54:12,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:54:12,925 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,34507,1731614051916 2024-11-14T19:54:12,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:12,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:12,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:54:12,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,34507,1731614051916 in 231 msec 2024-11-14T19:54:12,931 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:54:12,931 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 661 msec 2024-11-14T19:54:12,932 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:12,932 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:54:12,933 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:54:12,933 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,34507,1731614051916, seqNum=-1] 2024-11-14T19:54:12,934 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:54:12,935 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48783, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:54:12,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 717 msec 2024-11-14T19:54:12,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731614052940, completionTime=-1 2024-11-14T19:54:12,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:54:12,941 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:54:12,942 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:54:12,942 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731614112942 2024-11-14T19:54:12,942 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614172942 2024-11-14T19:54:12,942 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-14T19:54:12,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39903,1731614051787-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39903,1731614051787-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39903,1731614051787-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:39903, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:12,945 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.970sec 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39903,1731614051787-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:54:12,947 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39903,1731614051787-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:54:12,950 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:54:12,950 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:54:12,950 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39903,1731614051787-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:13,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@714cae8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:54:13,040 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,39903,-1 for getting cluster id 2024-11-14T19:54:13,041 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:54:13,043 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ab9d7f2c-7140-4757-9ef2-8c39e2681bdb' 2024-11-14T19:54:13,044 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:54:13,044 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ab9d7f2c-7140-4757-9ef2-8c39e2681bdb" 2024-11-14T19:54:13,044 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b998570, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:54:13,044 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,39903,-1] 2024-11-14T19:54:13,044 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:54:13,045 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:13,048 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:54:13,049 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c3d6ef7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:54:13,050 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:54:13,051 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,34507,1731614051916, seqNum=-1] 2024-11-14T19:54:13,052 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:54:13,054 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60934, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:54:13,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,39903,1731614051787 2024-11-14T19:54:13,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:13,062 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:54:13,062 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-14T19:54:13,062 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-14T19:54:13,062 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T19:54:13,064 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 867b237d0fa7,39903,1731614051787 2024-11-14T19:54:13,064 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f997047 2024-11-14T19:54:13,064 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T19:54:13,066 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51192, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T19:54:13,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T19:54:13,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T19:54:13,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:54:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T19:54:13,070 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T19:54:13,070 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:13,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-14T19:54:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:54:13,072 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T19:54:13,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741835_1011 (size=395) 2024-11-14T19:54:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741835_1011 (size=395) 2024-11-14T19:54:13,082 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bd52de533b3b9366f0c10bb5defbe498, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65 2024-11-14T19:54:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46855 is added to blk_1073741836_1012 (size=78) 2024-11-14T19:54:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741836_1012 (size=78) 2024-11-14T19:54:13,091 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:13,091 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing bd52de533b3b9366f0c10bb5defbe498, disabling compactions & flushes 2024-11-14T19:54:13,091 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,091 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,091 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. after waiting 0 ms 2024-11-14T19:54:13,091 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,091 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,091 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for bd52de533b3b9366f0c10bb5defbe498: Waiting for close lock at 1731614053091Disabling compacts and flushes for region at 1731614053091Disabling writes for close at 1731614053091Writing region close event to WAL at 1731614053091Closed at 1731614053091 2024-11-14T19:54:13,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T19:54:13,094 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731614053094"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731614053094"}]},"ts":"1731614053094"} 2024-11-14T19:54:13,097 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T19:54:13,099 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T19:54:13,099 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614053099"}]},"ts":"1731614053099"} 2024-11-14T19:54:13,101 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-14T19:54:13,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bd52de533b3b9366f0c10bb5defbe498, ASSIGN}] 2024-11-14T19:54:13,103 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bd52de533b3b9366f0c10bb5defbe498, ASSIGN 2024-11-14T19:54:13,104 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bd52de533b3b9366f0c10bb5defbe498, ASSIGN; state=OFFLINE, location=867b237d0fa7,34507,1731614051916; forceNewPlan=false, retain=false 2024-11-14T19:54:13,255 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bd52de533b3b9366f0c10bb5defbe498, regionState=OPENING, regionLocation=867b237d0fa7,34507,1731614051916 2024-11-14T19:54:13,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bd52de533b3b9366f0c10bb5defbe498, ASSIGN because future has completed 2024-11-14T19:54:13,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd52de533b3b9366f0c10bb5defbe498, server=867b237d0fa7,34507,1731614051916}] 2024-11-14T19:54:13,425 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,426 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bd52de533b3b9366f0c10bb5defbe498, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:54:13,426 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,427 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:13,427 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,427 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,429 INFO [StoreOpener-bd52de533b3b9366f0c10bb5defbe498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,432 INFO [StoreOpener-bd52de533b3b9366f0c10bb5defbe498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd52de533b3b9366f0c10bb5defbe498 columnFamilyName info 2024-11-14T19:54:13,432 DEBUG [StoreOpener-bd52de533b3b9366f0c10bb5defbe498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:13,433 INFO [StoreOpener-bd52de533b3b9366f0c10bb5defbe498-1 {}] regionserver.HStore(327): Store=bd52de533b3b9366f0c10bb5defbe498/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:13,433 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,434 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,435 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,436 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,436 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,438 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,440 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:54:13,441 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bd52de533b3b9366f0c10bb5defbe498; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766429, jitterRate=-0.025435179471969604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:54:13,441 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:13,441 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bd52de533b3b9366f0c10bb5defbe498: Running coprocessor pre-open hook at 1731614053427Writing region info on filesystem at 1731614053427Initializing all the Stores at 1731614053429 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614053429Cleaning up temporary data from old regions at 1731614053436 (+7 ms)Running coprocessor post-open hooks at 1731614053441 (+5 ms)Region opened successfully at 1731614053441 2024-11-14T19:54:13,442 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498., pid=6, masterSystemTime=1731614053415 2024-11-14T19:54:13,445 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,445 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:13,446 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bd52de533b3b9366f0c10bb5defbe498, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,34507,1731614051916 2024-11-14T19:54:13,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd52de533b3b9366f0c10bb5defbe498, server=867b237d0fa7,34507,1731614051916 because future has completed 2024-11-14T19:54:13,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T19:54:13,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bd52de533b3b9366f0c10bb5defbe498, server=867b237d0fa7,34507,1731614051916 in 189 msec 2024-11-14T19:54:13,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T19:54:13,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bd52de533b3b9366f0c10bb5defbe498, ASSIGN in 350 msec 2024-11-14T19:54:13,456 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T19:54:13,456 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614053456"}]},"ts":"1731614053456"} 2024-11-14T19:54:13,458 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-14T19:54:13,459 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T19:54:13,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 392 msec 2024-11-14T19:54:13,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:13,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:14,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:14,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:15,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:15,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:16,172 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:54:16,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:16,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:16,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:17,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:17,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:18,298 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T19:54:18,300 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-14T19:54:18,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:18,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:19,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:19,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:20,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:20,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:20,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:54:20,669 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T19:54:20,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T19:54:20,669 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-14T19:54:20,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:54:20,670 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T19:54:20,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:54:20,670 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T19:54:21,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:21,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:22,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:22,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39903 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:54:23,077 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-14T19:54:23,077 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-14T19:54:23,082 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T19:54:23,082 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:23,088 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498., hostname=867b237d0fa7,34507,1731614051916, seqNum=2] 2024-11-14T19:54:23,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:23,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:24,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:24,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:25,092 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:25,092 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:25,092 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:25,093 WARN [DataStreamer for file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 block BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK], DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]) is bad. 2024-11-14T19:54:25,093 WARN [DataStreamer for file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta block BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK], DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]) is bad. 2024-11-14T19:54:25,093 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:25,093 WARN [DataStreamer for file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 block BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK], DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46855,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]) is bad. 2024-11-14T19:54:25,093 WARN [PacketResponder: BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46855] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479197093_22 at /127.0.0.1:58128 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46855:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58128 dst: /127.0.0.1:46855 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:58174 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46855:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58174 dst: /127.0.0.1:46855 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,095 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479197093_22 at /127.0.0.1:36834 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36834 dst: /127.0.0.1:34575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:58180 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46855:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58180 dst: /127.0.0.1:46855 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:36862 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36862 dst: /127.0.0.1:34575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:36854 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36854 dst: /127.0.0.1:34575 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a305799{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:25,132 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d6e24b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:25,132 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:25,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69151493{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:25,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77037bf8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:25,133 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:25,133 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:25,133 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-63474148-172.17.0.2-1731614050215 (Datanode Uuid 218bf901-ab76-4697-a0a6-da051c6f042b) service to localhost/127.0.0.1:32953 2024-11-14T19:54:25,133 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:25,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data3/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:25,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data4/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:25,135 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:25,144 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:25,148 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:25,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:25,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:25,154 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:25,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18fd8c49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:25,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@224d98b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:25,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23abeef5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-45561-hadoop-hdfs-3_4_1-tests_jar-_-any-4887076013471045177/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:25,259 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@572e6439{HTTP/1.1, (http/1.1)}{localhost:45561} 2024-11-14T19:54:25,259 INFO [Time-limited test {}] server.Server(415): Started @177305ms 2024-11-14T19:54:25,260 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:25,287 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:25,287 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:25,287 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:25,287 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:38314 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38314 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,288 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:38306 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38306 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,288 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479197093_22 at /127.0.0.1:38310 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38310 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:25,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5514adb5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:25,294 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@362b6215{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:25,294 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:25,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6cb794bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:25,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5267df65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:25,295 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:25,295 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:25,295 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-63474148-172.17.0.2-1731614050215 (Datanode Uuid 9fab9f93-3f7c-4a9f-9efb-34f9d8de144f) service to localhost/127.0.0.1:32953 2024-11-14T19:54:25,295 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:25,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data1/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:25,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data2/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:25,296 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:25,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:25,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:25,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:25,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:25,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:54:25,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d6fed92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:25,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@155ac527{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:25,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c67455a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-39313-hadoop-hdfs-3_4_1-tests_jar-_-any-17039723363237393134/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:25,431 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fb188b1{HTTP/1.1, (http/1.1)}{localhost:39313} 2024-11-14T19:54:25,431 INFO [Time-limited test {}] server.Server(415): Started @177476ms 2024-11-14T19:54:25,432 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:25,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:25,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:25,583 WARN [Thread-1337 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:25,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x908c75ba81c7997 with lease ID 0x6fe81fce59c64c92: from storage DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2 node DatanodeRegistration(127.0.0.1:39311, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=33847, infoSecurePort=0, ipcPort=33957, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:25,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x908c75ba81c7997 with lease ID 0x6fe81fce59c64c92: from storage DS-a3311aa5-8dd7-4193-af9e-df54a217e897 node DatanodeRegistration(127.0.0.1:39311, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=33847, infoSecurePort=0, ipcPort=33957, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:25,771 WARN [Thread-1358 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:25,781 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa694e8d5d1dde6d6 with lease ID 0x6fe81fce59c64c93: from storage DS-b46607e9-a713-4575-9edb-111f698c75be node DatanodeRegistration(127.0.0.1:36465, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=35605, infoSecurePort=0, ipcPort=35351, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:25,781 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa694e8d5d1dde6d6 with lease ID 0x6fe81fce59c64c93: from storage DS-49c94482-bbeb-4683-9b69-09a70914bbd1 node DatanodeRegistration(127.0.0.1:36465, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=35605, infoSecurePort=0, ipcPort=35351, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:26,464 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-14T19:54:26,467 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-14T19:54:26,468 ERROR [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:26,468 WARN [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:26,469 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C34507%2C1731614051916:(num 1731614052437) roll requested 2024-11-14T19:54:26,469 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:26,478 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 newFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:26,478 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:26,478 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:26,478 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:26,478 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:26,478 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:26,479 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:26,479 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:26,479 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:26,479 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:26,479 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33847:33847),(127.0.0.1/127.0.0.1:35605:35605)] 2024-11-14T19:54:26,479 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 is not closed yet, will try archiving it next time 2024-11-14T19:54:26,480 WARN [IPC Server handler 0 on default port 32953 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-14T19:54:26,480 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 after 1ms 2024-11-14T19:54:26,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:26,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:27,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:27,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:28,483 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-14T19:54:28,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:28,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:29,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:29,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:29,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T19:54:30,481 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 after 4002ms 2024-11-14T19:54:30,486 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:36465,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:30,487 WARN [DataStreamer for file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 block BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39311,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK], DatanodeInfoWithStorage[127.0.0.1:36465,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36465,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]) is bad. 2024-11-14T19:54:30,487 WARN [PacketResponder: BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36465] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:30,487 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:43358 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43358 dst: /127.0.0.1:36465 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:30,487 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:34592 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39311:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34592 dst: /127.0.0.1:39311 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:30,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c67455a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:30,540 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fb188b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:30,540 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:30,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@155ac527{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:30,540 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d6fed92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:30,542 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:30,542 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:30,542 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-63474148-172.17.0.2-1731614050215 (Datanode Uuid 9fab9f93-3f7c-4a9f-9efb-34f9d8de144f) service to localhost/127.0.0.1:32953 2024-11-14T19:54:30,542 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:30,542 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data1/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:30,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data2/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:30,543 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:30,552 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:30,556 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:30,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:30,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:30,557 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:54:30,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2efb5f06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:30,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2122c1dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:30,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:30,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:30,666 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e88a461{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-43269-hadoop-hdfs-3_4_1-tests_jar-_-any-12275989218032654551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:30,666 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b7001f3{HTTP/1.1, (http/1.1)}{localhost:43269} 2024-11-14T19:54:30,666 INFO [Time-limited test {}] server.Server(415): Started @182712ms 2024-11-14T19:54:30,667 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:30,687 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:30,688 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1308489250_22 at /127.0.0.1:34618 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39311:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34618 dst: /127.0.0.1:39311 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:30,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23abeef5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:30,692 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@572e6439{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:30,692 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:30,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@224d98b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:30,692 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18fd8c49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:30,693 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:30,693 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:30,693 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-63474148-172.17.0.2-1731614050215 (Datanode Uuid 218bf901-ab76-4697-a0a6-da051c6f042b) service to localhost/127.0.0.1:32953 2024-11-14T19:54:30,693 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:30,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data3/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:30,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data4/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:30,694 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:30,707 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:30,711 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:30,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:30,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:30,714 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:30,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f5ff1a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:30,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f4f6bd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:30,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@165c873c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/java.io.tmpdir/jetty-localhost-45199-hadoop-hdfs-3_4_1-tests_jar-_-any-15595795531225147939/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:30,820 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b6bf876{HTTP/1.1, (http/1.1)}{localhost:45199} 2024-11-14T19:54:30,820 INFO [Time-limited test {}] server.Server(415): Started @182866ms 2024-11-14T19:54:30,822 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:30,938 WARN [Thread-1411 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fd43313eeacec47 with lease ID 0x6fe81fce59c64c94: from storage DS-b46607e9-a713-4575-9edb-111f698c75be node DatanodeRegistration(127.0.0.1:41215, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=46569, infoSecurePort=0, ipcPort=36757, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fd43313eeacec47 with lease ID 0x6fe81fce59c64c94: from storage DS-49c94482-bbeb-4683-9b69-09a70914bbd1 node DatanodeRegistration(127.0.0.1:41215, datanodeUuid=9fab9f93-3f7c-4a9f-9efb-34f9d8de144f, infoPort=46569, infoSecurePort=0, ipcPort=36757, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:31,122 WARN [Thread-1432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:31,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbcaf67e237c26848 with lease ID 0x6fe81fce59c64c95: from storage DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2 node DatanodeRegistration(127.0.0.1:39249, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=34947, infoSecurePort=0, ipcPort=34169, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T19:54:31,126 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbcaf67e237c26848 with lease ID 0x6fe81fce59c64c95: from storage DS-a3311aa5-8dd7-4193-af9e-df54a217e897 node DatanodeRegistration(127.0.0.1:39249, datanodeUuid=218bf901-ab76-4697-a0a6-da051c6f042b, infoPort=34947, infoSecurePort=0, ipcPort=34169, storageInfo=lv=-57;cid=testClusterID;nsid=1357765449;c=1731614050215), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:31,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:31,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:31,846 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-14T19:54:31,849 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-14T19:54:31,851 ERROR [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39311,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:31,851 WARN [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39311,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:31,852 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C34507%2C1731614051916:(num 1731614066469) roll requested 2024-11-14T19:54:31,852 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.1731614071852 2024-11-14T19:54:31,865 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 newFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 2024-11-14T19:54:31,865 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:31,866 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:31,866 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:31,866 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:31,866 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:31,866 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 2024-11-14T19:54:31,866 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39311,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:31,866 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39311,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:31,866 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:31,867 WARN [IPC Server handler 1 on default port 32953 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-14T19:54:31,867 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34947:34947),(127.0.0.1/127.0.0.1:46569:46569)] 2024-11-14T19:54:31,867 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 is not closed yet, will try archiving it next time 2024-11-14T19:54:31,867 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 after 1ms 2024-11-14T19:54:32,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:32,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:33,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:33,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:33,868 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:33,874 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 newFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:33,874 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:33,874 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:33,874 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:33,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:33,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:33,874 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:33,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34947:34947),(127.0.0.1/127.0.0.1:46569:46569)] 2024-11-14T19:54:33,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 is not closed yet, will try archiving it next time 2024-11-14T19:54:33,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 is not closed yet, will try archiving it next time 2024-11-14T19:54:33,884 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:33,884 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:33,884 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 after 0ms 2024-11-14T19:54:33,884 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:33,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741838_1019 (size=1264) 2024-11-14T19:54:33,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741838_1019 (size=1264) 2024-11-14T19:54:33,885 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 is not closed yet, will try archiving it next time 2024-11-14T19:54:33,896 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731614053441/Put/vlen=218/seqid=0] 2024-11-14T19:54:33,897 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731614063090/Put/vlen=1045/seqid=0] 2024-11-14T19:54:33,897 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614052437 2024-11-14T19:54:33,897 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:33,897 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:33,898 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 after 1ms 2024-11-14T19:54:33,898 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:33,904 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731614066468/Put/vlen=1045/seqid=0] 2024-11-14T19:54:33,904 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731614068484/Put/vlen=1045/seqid=0] 2024-11-14T19:54:33,904 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 2024-11-14T19:54:33,904 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 2024-11-14T19:54:33,904 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 2024-11-14T19:54:33,904 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 after 0ms 2024-11-14T19:54:33,904 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614071852 2024-11-14T19:54:33,908 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731614071851/Put/vlen=1045/seqid=0] 2024-11-14T19:54:33,908 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:33,908 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:33,909 WARN [IPC Server handler 0 on default port 32953 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-14T19:54:33,909 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 after 1ms 2024-11-14T19:54:34,126 WARN [ResponseProcessor for block BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:34,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479197093_22 at /127.0.0.1:47826 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39249:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47826 dst: /127.0.0.1:39249 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39249 remote=/127.0.0.1:47826]. Total timeout mills is 60000, 59747 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:34,127 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-479197093_22 at /127.0.0.1:44322 [Receiving block BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44322 dst: /127.0.0.1:41215 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:54:34,127 WARN [DataStreamer for file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 block BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39249,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK], DatanodeInfoWithStorage[127.0.0.1:41215,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39249,DS-5fb289e4-3b84-4138-9d66-d9d01a69a0d2,DISK]) is bad. 2024-11-14T19:54:34,130 WARN [DataStreamer for file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 block BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:34,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741839_1022 (size=85) 2024-11-14T19:54:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741839_1022 (size=85) 2024-11-14T19:54:34,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:34,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:34,942 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T19:54:35,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:35,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:35,868 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614066469 after 4002ms 2024-11-14T19:54:36,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:36,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:37,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:37,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:37,910 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 after 4002ms 2024-11-14T19:54:37,910 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:37,916 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:37,917 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-14T19:54:37,917 ERROR [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:37,917 WARN [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:37,917 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C34507%2C1731614051916.meta:.meta(num 1731614052865) roll requested 2024-11-14T19:54:37,918 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.meta.1731614077917.meta 2024-11-14T19:54:37,922 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:37,922 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:37,923 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:37,923 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:37,923 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:37,923 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614077917.meta 2024-11-14T19:54:37,923 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:37,923 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:37,923 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta 2024-11-14T19:54:37,924 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46569:46569),(127.0.0.1/127.0.0.1:34947:34947)] 2024-11-14T19:54:37,924 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta is not closed yet, will try archiving it next time 2024-11-14T19:54:37,924 WARN [IPC Server handler 1 on default port 32953 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-14T19:54:37,924 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta after 1ms 2024-11-14T19:54:37,940 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/info/943a37366429408fb870e6ce0877b0cd is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498./info:regioninfo/1731614053445/Put/seqid=0 2024-11-14T19:54:37,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741841_1025 (size=7125) 2024-11-14T19:54:37,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741841_1025 (size=7125) 2024-11-14T19:54:37,945 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/info/943a37366429408fb870e6ce0877b0cd 2024-11-14T19:54:37,967 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/ns/c0c424d084c7438b8a8e55570e744f09 is 43, key is default/ns:d/1731614052935/Put/seqid=0 2024-11-14T19:54:37,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741842_1026 (size=5153) 2024-11-14T19:54:37,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741842_1026 (size=5153) 2024-11-14T19:54:37,977 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/ns/c0c424d084c7438b8a8e55570e744f09 2024-11-14T19:54:38,000 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/table/21e17d24ce054d9ba1c775957d5a90b3 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731614053456/Put/seqid=0 2024-11-14T19:54:38,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741843_1027 (size=5438) 2024-11-14T19:54:38,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741843_1027 (size=5438) 2024-11-14T19:54:38,005 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/table/21e17d24ce054d9ba1c775957d5a90b3 2024-11-14T19:54:38,013 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/info/943a37366429408fb870e6ce0877b0cd as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/info/943a37366429408fb870e6ce0877b0cd 2024-11-14T19:54:38,020 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/info/943a37366429408fb870e6ce0877b0cd, entries=10, sequenceid=11, filesize=7.0 K 2024-11-14T19:54:38,021 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/ns/c0c424d084c7438b8a8e55570e744f09 as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/ns/c0c424d084c7438b8a8e55570e744f09 2024-11-14T19:54:38,029 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/ns/c0c424d084c7438b8a8e55570e744f09, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T19:54:38,030 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/.tmp/table/21e17d24ce054d9ba1c775957d5a90b3 as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/table/21e17d24ce054d9ba1c775957d5a90b3 2024-11-14T19:54:38,037 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/table/21e17d24ce054d9ba1c775957d5a90b3, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T19:54:38,038 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false 2024-11-14T19:54:38,038 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T19:54:38,038 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing bd52de533b3b9366f0c10bb5defbe498 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-14T19:54:38,039 ERROR [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:38,039 WARN [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65-prefix:867b237d0fa7,34507,1731614051916 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:38,040 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C34507%2C1731614051916:(num 1731614073868) roll requested 2024-11-14T19:54:38,040 INFO [regionserver/867b237d0fa7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C34507%2C1731614051916.1731614078040 2024-11-14T19:54:38,045 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 newFile=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614078040 2024-11-14T19:54:38,045 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,045 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,046 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,046 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,046 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,046 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614078040 2024-11-14T19:54:38,046 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:38,046 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-63474148-172.17.0.2-1731614050215:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:38,047 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:38,047 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 after 0ms 2024-11-14T19:54:38,051 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.1731614073868 to hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs/867b237d0fa7%2C34507%2C1731614051916.1731614073868 2024-11-14T19:54:38,055 DEBUG [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34947:34947),(127.0.0.1/127.0.0.1:46569:46569)] 2024-11-14T19:54:38,071 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/.tmp/info/e62c2e675d0c444fa9af51334b26ac90 is 1080, key is row1002/info:/1731614063090/Put/seqid=0 2024-11-14T19:54:38,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741845_1029 (size=9270) 2024-11-14T19:54:38,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741845_1029 (size=9270) 2024-11-14T19:54:38,079 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/.tmp/info/e62c2e675d0c444fa9af51334b26ac90 2024-11-14T19:54:38,086 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/.tmp/info/e62c2e675d0c444fa9af51334b26ac90 as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/info/e62c2e675d0c444fa9af51334b26ac90 2024-11-14T19:54:38,093 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/info/e62c2e675d0c444fa9af51334b26ac90, entries=4, sequenceid=8, filesize=9.1 K 2024-11-14T19:54:38,095 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for bd52de533b3b9366f0c10bb5defbe498 in 57ms, sequenceid=8, compaction requested=false 2024-11-14T19:54:38,095 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for bd52de533b3b9366f0c10bb5defbe498: 2024-11-14T19:54:38,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:54:38,101 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:54:38,101 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:38,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:38,101 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:38,101 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:54:38,101 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:54:38,102 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1451618077, stopped=false 2024-11-14T19:54:38,102 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,39903,1731614051787 2024-11-14T19:54:38,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:38,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:38,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:38,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:38,196 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:54:38,196 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:54:38,196 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:38,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:38,197 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,34507,1731614051916' ***** 2024-11-14T19:54:38,197 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:54:38,197 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:38,197 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:38,197 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:54:38,197 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:54:38,197 INFO [RS:0;867b237d0fa7:34507 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(3091): Received CLOSE for bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,34507,1731614051916 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:34507. 2024-11-14T19:54:38,198 DEBUG [RS:0;867b237d0fa7:34507 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:54:38,198 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bd52de533b3b9366f0c10bb5defbe498, disabling compactions & flushes 2024-11-14T19:54:38,198 DEBUG [RS:0;867b237d0fa7:34507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:38,198 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:38,198 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:54:38,198 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. after waiting 0 ms 2024-11-14T19:54:38,198 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:54:38,198 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:38,199 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:54:38,199 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T19:54:38,199 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, bd52de533b3b9366f0c10bb5defbe498=TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498.} 2024-11-14T19:54:38,199 DEBUG [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bd52de533b3b9366f0c10bb5defbe498 2024-11-14T19:54:38,199 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:54:38,199 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:54:38,199 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:54:38,199 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:54:38,199 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:54:38,204 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/default/TestLogRolling-testLogRollOnPipelineRestart/bd52de533b3b9366f0c10bb5defbe498/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-14T19:54:38,204 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T19:54:38,205 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:54:38,205 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:38,205 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:54:38,205 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bd52de533b3b9366f0c10bb5defbe498: Waiting for close lock at 1731614078198Running coprocessor pre-close hooks at 1731614078198Disabling compacts and flushes for region at 1731614078198Disabling writes for close at 1731614078198Writing region close event to WAL at 1731614078199 (+1 ms)Running coprocessor post-close hooks at 1731614078205 (+6 ms)Closed at 1731614078205 2024-11-14T19:54:38,205 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614078199Running coprocessor pre-close hooks at 1731614078199Disabling compacts and flushes for region at 1731614078199Disabling writes for close at 1731614078199Writing region close event to WAL at 1731614078201 (+2 ms)Running coprocessor post-close hooks at 1731614078205 (+4 ms)Closed at 1731614078205 2024-11-14T19:54:38,206 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:54:38,206 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731614053067.bd52de533b3b9366f0c10bb5defbe498. 2024-11-14T19:54:38,304 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:54:38,327 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T19:54:38,328 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T19:54:38,399 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,34507,1731614051916; all regions closed. 2024-11-14T19:54:38,400 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,400 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,400 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,400 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,400 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:38,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741840_1023 (size=825) 2024-11-14T19:54:38,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741840_1023 (size=825) 2024-11-14T19:54:38,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:38,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:39,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:39,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:40,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:40,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:54:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:54:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T19:54:41,127 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T19:54:41,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:41,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:41,770 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:54:41,925 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta after 4002ms 2024-11-14T19:54:41,925 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/WALs/867b237d0fa7,34507,1731614051916/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta to hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs/867b237d0fa7%2C34507%2C1731614051916.meta.1731614052865.meta 2024-11-14T19:54:41,929 DEBUG [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs 2024-11-14T19:54:41,930 INFO [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C34507%2C1731614051916.meta:.meta(num 1731614077917) 2024-11-14T19:54:41,930 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:41,930 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:41,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:41,930 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:41,931 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741844_1028 (size=1162) 2024-11-14T19:54:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741844_1028 (size=1162) 2024-11-14T19:54:41,940 DEBUG [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs 2024-11-14T19:54:41,940 INFO [RS:0;867b237d0fa7:34507 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C34507%2C1731614051916:(num 1731614078040) 2024-11-14T19:54:41,940 DEBUG [RS:0;867b237d0fa7:34507 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:41,940 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:54:41,940 INFO [RS:0;867b237d0fa7:34507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:54:41,940 INFO [RS:0;867b237d0fa7:34507 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T19:54:41,941 INFO [RS:0;867b237d0fa7:34507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:54:41,941 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:54:41,941 INFO [RS:0;867b237d0fa7:34507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34507 2024-11-14T19:54:42,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:54:42,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,34507,1731614051916 2024-11-14T19:54:42,007 INFO [RS:0;867b237d0fa7:34507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:54:42,008 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,34507,1731614051916] 2024-11-14T19:54:42,104 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,34507,1731614051916 already deleted, retry=false 2024-11-14T19:54:42,104 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,34507,1731614051916 expired; onlineServers=0 2024-11-14T19:54:42,104 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,39903,1731614051787' ***** 2024-11-14T19:54:42,104 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:54:42,104 INFO [M:0;867b237d0fa7:39903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:54:42,104 INFO [M:0;867b237d0fa7:39903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:54:42,104 DEBUG [M:0;867b237d0fa7:39903 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:54:42,104 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:54:42,104 DEBUG [M:0;867b237d0fa7:39903 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:54:42,104 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614052229 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614052229,5,FailOnTimeoutGroup] 2024-11-14T19:54:42,104 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614052228 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614052228,5,FailOnTimeoutGroup] 2024-11-14T19:54:42,105 INFO [M:0;867b237d0fa7:39903 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:54:42,105 INFO [M:0;867b237d0fa7:39903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:54:42,105 DEBUG [M:0;867b237d0fa7:39903 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:54:42,105 INFO [M:0;867b237d0fa7:39903 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:54:42,105 INFO [M:0;867b237d0fa7:39903 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:54:42,105 INFO [M:0;867b237d0fa7:39903 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:54:42,105 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:54:42,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:54:42,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:42,113 DEBUG [M:0;867b237d0fa7:39903 {}] zookeeper.ZKUtil(347): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T19:54:42,113 WARN [M:0;867b237d0fa7:39903 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T19:54:42,113 INFO [M:0;867b237d0fa7:39903 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/.lastflushedseqids 2024-11-14T19:54:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741846_1030 (size=111) 2024-11-14T19:54:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741846_1030 (size=111) 2024-11-14T19:54:42,121 INFO [M:0;867b237d0fa7:39903 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:54:42,121 INFO [M:0;867b237d0fa7:39903 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:54:42,122 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:54:42,122 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:42,122 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:42,122 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:54:42,122 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:42,122 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-14T19:54:42,122 ERROR [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData-prefix:867b237d0fa7,39903,1731614051787 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:42,122 WARN [FSHLog-0-hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData-prefix:867b237d0fa7,39903,1731614051787 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:42,123 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 867b237d0fa7%2C39903%2C1731614051787:(num 1731614052046) roll requested 2024-11-14T19:54:42,123 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39903%2C1731614051787.1731614082123 2024-11-14T19:54:42,129 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,129 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,129 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,130 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,130 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614082123 2024-11-14T19:54:42,130 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:42,130 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34575,DS-b46607e9-a713-4575-9edb-111f698c75be,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T19:54:42,131 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 2024-11-14T19:54:42,131 WARN [IPC Server handler 4 on default port 32953 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1014 2024-11-14T19:54:42,131 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 after 0ms 2024-11-14T19:54:42,132 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34947:34947),(127.0.0.1/127.0.0.1:46569:46569)] 2024-11-14T19:54:42,132 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 is not closed yet, will try archiving it next time 2024-11-14T19:54:42,147 DEBUG [M:0;867b237d0fa7:39903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46705faf80504afbad70614a4ab147cd is 82, key is hbase:meta,,1/info:regioninfo/1731614052900/Put/seqid=0 2024-11-14T19:54:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741848_1033 (size=5672) 2024-11-14T19:54:42,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741848_1033 (size=5672) 2024-11-14T19:54:42,153 INFO [M:0;867b237d0fa7:39903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46705faf80504afbad70614a4ab147cd 2024-11-14T19:54:42,178 DEBUG [M:0;867b237d0fa7:39903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1d9e939bb8e74a0891a35c03c5bc4eb7 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731614053461/Put/seqid=0 2024-11-14T19:54:42,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741849_1034 (size=6118) 2024-11-14T19:54:42,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741849_1034 (size=6118) 2024-11-14T19:54:42,183 INFO [M:0;867b237d0fa7:39903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1d9e939bb8e74a0891a35c03c5bc4eb7 2024-11-14T19:54:42,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:42,193 INFO [RS:0;867b237d0fa7:34507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:54:42,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34507-0x1013c1753650001, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:42,193 INFO [RS:0;867b237d0fa7:34507 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,34507,1731614051916; zookeeper connection closed. 2024-11-14T19:54:42,193 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@458b4d6b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@458b4d6b 2024-11-14T19:54:42,193 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T19:54:42,204 DEBUG [M:0;867b237d0fa7:39903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a8e4bd46b59d47bdad24a0eae7361941 is 69, key is 867b237d0fa7,34507,1731614051916/rs:state/1731614052281/Put/seqid=0 2024-11-14T19:54:42,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741850_1035 (size=5156) 2024-11-14T19:54:42,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741850_1035 (size=5156) 2024-11-14T19:54:42,209 INFO [M:0;867b237d0fa7:39903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a8e4bd46b59d47bdad24a0eae7361941 2024-11-14T19:54:42,229 DEBUG [M:0;867b237d0fa7:39903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ae0ebf625605466f994c75b984b6ad59 is 52, key is load_balancer_on/state:d/1731614053060/Put/seqid=0 2024-11-14T19:54:42,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741851_1036 (size=5056) 2024-11-14T19:54:42,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741851_1036 (size=5056) 2024-11-14T19:54:42,234 INFO [M:0;867b237d0fa7:39903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ae0ebf625605466f994c75b984b6ad59 2024-11-14T19:54:42,240 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46705faf80504afbad70614a4ab147cd as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/46705faf80504afbad70614a4ab147cd 2024-11-14T19:54:42,245 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/46705faf80504afbad70614a4ab147cd, entries=8, sequenceid=56, filesize=5.5 K 2024-11-14T19:54:42,247 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1d9e939bb8e74a0891a35c03c5bc4eb7 as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1d9e939bb8e74a0891a35c03c5bc4eb7 2024-11-14T19:54:42,253 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1d9e939bb8e74a0891a35c03c5bc4eb7, entries=6, sequenceid=56, filesize=6.0 K 2024-11-14T19:54:42,254 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a8e4bd46b59d47bdad24a0eae7361941 as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a8e4bd46b59d47bdad24a0eae7361941 2024-11-14T19:54:42,262 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a8e4bd46b59d47bdad24a0eae7361941, entries=1, sequenceid=56, filesize=5.0 K 2024-11-14T19:54:42,263 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ae0ebf625605466f994c75b984b6ad59 as hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ae0ebf625605466f994c75b984b6ad59 2024-11-14T19:54:42,269 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ae0ebf625605466f994c75b984b6ad59, entries=1, sequenceid=56, filesize=4.9 K 2024-11-14T19:54:42,271 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=56, compaction requested=false 2024-11-14T19:54:42,273 INFO [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:42,273 DEBUG [M:0;867b237d0fa7:39903 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614082121Disabling compacts and flushes for region at 1731614082121Disabling writes for close at 1731614082122 (+1 ms)Obtaining lock to block concurrent updates at 1731614082122Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731614082122Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731614082123 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731614082132 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731614082132Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731614082147 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731614082147Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614082158 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614082177 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614082177Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614082188 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614082204 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614082204Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614082214 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614082228 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614082228Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b83b1a0: reopening flushed file at 1731614082239 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58dbaefd: reopening flushed file at 1731614082246 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b038fe9: reopening flushed file at 1731614082253 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d3c2636: reopening flushed file at 1731614082262 (+9 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=56, compaction requested=false at 1731614082271 (+9 ms)Writing region close event to WAL at 1731614082273 (+2 ms)Closed at 1731614082273 2024-11-14T19:54:42,274 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,274 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,274 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,274 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,274 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:54:42,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41215 is added to blk_1073741847_1031 (size=757) 2024-11-14T19:54:42,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39249 is added to blk_1073741847_1031 (size=757) 2024-11-14T19:54:42,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:42,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:43,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:43,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:43,740 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:54:43,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:43,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:44,128 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1014: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T19:54:44,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:44,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:45,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:45,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:46,132 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 after 4001ms 2024-11-14T19:54:46,133 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/WALs/867b237d0fa7,39903,1731614051787/867b237d0fa7%2C39903%2C1731614051787.1731614052046 to hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/oldWALs/867b237d0fa7%2C39903%2C1731614051787.1731614052046 2024-11-14T19:54:46,137 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/MasterData/oldWALs/867b237d0fa7%2C39903%2C1731614051787.1731614052046 to hdfs://localhost:32953/user/jenkins/test-data/e6833cb1-3c22-5799-97f0-d51f7c3a8b65/oldWALs/867b237d0fa7%2C39903%2C1731614051787.1731614052046$masterlocalwal$ 2024-11-14T19:54:46,137 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:54:46,137 INFO [M:0;867b237d0fa7:39903 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:54:46,137 INFO [M:0;867b237d0fa7:39903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39903 2024-11-14T19:54:46,137 INFO [M:0;867b237d0fa7:39903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:54:46,249 INFO [M:0;867b237d0fa7:39903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:54:46,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:46,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39903-0x1013c1753650000, quorum=127.0.0.1:55467, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:54:46,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@165c873c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:46,253 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b6bf876{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:46,253 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:46,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f4f6bd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:46,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f5ff1a9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:46,255 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:46,255 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:46,255 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:46,255 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-63474148-172.17.0.2-1731614050215 (Datanode Uuid 218bf901-ab76-4697-a0a6-da051c6f042b) service to localhost/127.0.0.1:32953 2024-11-14T19:54:46,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data3/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:46,256 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data4/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:46,256 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:46,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e88a461{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:46,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b7001f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:46,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:46,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2122c1dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:46,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2efb5f06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:46,261 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:54:46,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:54:46,261 WARN [BP-63474148-172.17.0.2-1731614050215 heartbeating to localhost/127.0.0.1:32953 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-63474148-172.17.0.2-1731614050215 (Datanode Uuid 9fab9f93-3f7c-4a9f-9efb-34f9d8de144f) service to localhost/127.0.0.1:32953 2024-11-14T19:54:46,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:54:46,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data1/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:46,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/cluster_27c04c67-dbda-ec7c-5ea2-ed32993188ab/data/data2/current/BP-63474148-172.17.0.2-1731614050215 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:54:46,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:54:46,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@412993b5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:54:46,268 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4abaeae4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:54:46,268 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:54:46,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@349c14ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:54:46,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13514823{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir/,STOPPED} 2024-11-14T19:54:46,274 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:54:46,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:54:46,301 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32953 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32953 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:32953 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:32953 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:32953 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:32953 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=255 (was 240) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6031 (was 5649) - AvailableMemoryMB LEAK? - 2024-11-14T19:54:46,308 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=255, ProcessCount=11, AvailableMemoryMB=6031 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.log.dir so I do NOT create it in target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/944f412f-1b17-4719-5782-fcddaf2e2df1/hadoop.tmp.dir so I do NOT create it in target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e, deleteOnExit=true 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/test.cache.data in system properties and HBase conf 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:54:46,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:54:46,310 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:54:46,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:54:46,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:54:46,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:54:46,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:54:46,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:54:46,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:54:46,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:54:46,326 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:54:46,580 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:46,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:46,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:46,585 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:46,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:46,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:46,588 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:46,590 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:46,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53fdada6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:46,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3def846f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:46,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5da50ccf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/java.io.tmpdir/jetty-localhost-46097-hadoop-hdfs-3_4_1-tests_jar-_-any-2388180443376298070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:54:46,695 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13aafd08{HTTP/1.1, (http/1.1)}{localhost:46097} 2024-11-14T19:54:46,695 INFO [Time-limited test {}] server.Server(415): Started @198741ms 2024-11-14T19:54:46,707 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:54:46,857 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:46,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:46,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:46,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:46,862 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:46,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cce55bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:46,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65e861e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:46,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75949488{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/java.io.tmpdir/jetty-localhost-41799-hadoop-hdfs-3_4_1-tests_jar-_-any-4499081742538512846/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:46,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53a45e9d{HTTP/1.1, (http/1.1)}{localhost:41799} 2024-11-14T19:54:46,972 INFO [Time-limited test {}] server.Server(415): Started @199018ms 2024-11-14T19:54:46,973 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:47,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:54:47,012 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:54:47,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:54:47,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:54:47,014 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:54:47,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a5795ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:54:47,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62e7c7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:54:47,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36ee399a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/java.io.tmpdir/jetty-localhost-34843-hadoop-hdfs-3_4_1-tests_jar-_-any-11681839694711468251/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:54:47,119 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5393e504{HTTP/1.1, (http/1.1)}{localhost:34843} 2024-11-14T19:54:47,119 INFO [Time-limited test {}] server.Server(415): Started @199165ms 2024-11-14T19:54:47,121 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:54:47,570 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data1/current/BP-46802264-172.17.0.2-1731614086336/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:47,570 WARN [Thread-1651 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data2/current/BP-46802264-172.17.0.2-1731614086336/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:47,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:47,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:47,589 WARN [Thread-1615 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:47,591 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xced4de122f6f96a0 with lease ID 0xc63eaa9fd4c0eeb7: Processing first storage report for DS-a2a1fec1-57db-4f40-b328-26822254bda4 from datanode DatanodeRegistration(127.0.0.1:40795, datanodeUuid=1c7fda34-774a-478a-a2fe-d225c9bd26de, infoPort=33739, infoSecurePort=0, ipcPort=46505, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336) 2024-11-14T19:54:47,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xced4de122f6f96a0 with lease ID 0xc63eaa9fd4c0eeb7: from storage DS-a2a1fec1-57db-4f40-b328-26822254bda4 node DatanodeRegistration(127.0.0.1:40795, datanodeUuid=1c7fda34-774a-478a-a2fe-d225c9bd26de, infoPort=33739, infoSecurePort=0, ipcPort=46505, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:47,591 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xced4de122f6f96a0 with lease ID 0xc63eaa9fd4c0eeb7: Processing first storage report for DS-c7f94fc2-999d-4615-a181-963a6ab5d2f0 from datanode DatanodeRegistration(127.0.0.1:40795, datanodeUuid=1c7fda34-774a-478a-a2fe-d225c9bd26de, infoPort=33739, infoSecurePort=0, ipcPort=46505, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336) 2024-11-14T19:54:47,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xced4de122f6f96a0 with lease ID 0xc63eaa9fd4c0eeb7: from storage DS-c7f94fc2-999d-4615-a181-963a6ab5d2f0 node DatanodeRegistration(127.0.0.1:40795, datanodeUuid=1c7fda34-774a-478a-a2fe-d225c9bd26de, infoPort=33739, infoSecurePort=0, ipcPort=46505, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:47,733 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data3/current/BP-46802264-172.17.0.2-1731614086336/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:47,734 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data4/current/BP-46802264-172.17.0.2-1731614086336/current, will proceed with Du for space computation calculation, 2024-11-14T19:54:47,754 WARN [Thread-1638 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:54:47,755 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5688f06d81406153 with lease ID 0xc63eaa9fd4c0eeb8: Processing first storage report for DS-9383e036-975c-4b1c-9845-b2f0465a88a0 from datanode DatanodeRegistration(127.0.0.1:45499, datanodeUuid=daa7497d-18ff-42e6-b9fc-eea280bbe94b, infoPort=45085, infoSecurePort=0, ipcPort=36115, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336) 2024-11-14T19:54:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5688f06d81406153 with lease ID 0xc63eaa9fd4c0eeb8: from storage DS-9383e036-975c-4b1c-9845-b2f0465a88a0 node DatanodeRegistration(127.0.0.1:45499, datanodeUuid=daa7497d-18ff-42e6-b9fc-eea280bbe94b, infoPort=45085, infoSecurePort=0, ipcPort=36115, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5688f06d81406153 with lease ID 0xc63eaa9fd4c0eeb8: Processing first storage report for DS-a4f8cdbd-2aa5-4047-871c-da24ce046e3a from datanode DatanodeRegistration(127.0.0.1:45499, datanodeUuid=daa7497d-18ff-42e6-b9fc-eea280bbe94b, infoPort=45085, infoSecurePort=0, ipcPort=36115, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336) 2024-11-14T19:54:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5688f06d81406153 with lease ID 0xc63eaa9fd4c0eeb8: from storage DS-a4f8cdbd-2aa5-4047-871c-da24ce046e3a node DatanodeRegistration(127.0.0.1:45499, datanodeUuid=daa7497d-18ff-42e6-b9fc-eea280bbe94b, infoPort=45085, infoSecurePort=0, ipcPort=36115, storageInfo=lv=-57;cid=testClusterID;nsid=379665706;c=1731614086336), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:54:47,853 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9 2024-11-14T19:54:47,856 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/zookeeper_0, clientPort=49808, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:54:47,857 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49808 2024-11-14T19:54:47,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:47,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:47,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:54:47,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:54:47,869 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067 with version=8 2024-11-14T19:54:47,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:54:47,872 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:54:47,872 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:54:47,873 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39521 2024-11-14T19:54:47,874 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39521 connecting to ZooKeeper ensemble=127.0.0.1:49808 2024-11-14T19:54:47,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395210x0, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:54:47,907 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39521-0x1013c17e0590000 connected 2024-11-14T19:54:47,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:47,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:47,972 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:47,972 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067, hbase.cluster.distributed=false 2024-11-14T19:54:47,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:54:47,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39521 2024-11-14T19:54:47,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39521 2024-11-14T19:54:47,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39521 2024-11-14T19:54:47,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39521 2024-11-14T19:54:47,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39521 2024-11-14T19:54:47,990 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:54:47,991 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:54:47,992 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39535 2024-11-14T19:54:47,993 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39535 connecting to ZooKeeper ensemble=127.0.0.1:49808 2024-11-14T19:54:47,994 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:47,997 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:48,007 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395350x0, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:54:48,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:395350x0, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:54:48,008 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:54:48,009 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39535-0x1013c17e0590001 connected 2024-11-14T19:54:48,009 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:54:48,009 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:54:48,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:54:48,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39535 2024-11-14T19:54:48,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39535 2024-11-14T19:54:48,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39535 2024-11-14T19:54:48,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39535 2024-11-14T19:54:48,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39535 2024-11-14T19:54:48,025 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:39521 2024-11-14T19:54:48,025 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,032 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:48,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:48,033 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,040 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:54:48,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,040 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,041 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:54:48,041 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,39521,1731614087871 from backup master directory 2024-11-14T19:54:48,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,049 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:48,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:54:48,049 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:54:48,049 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,058 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/hbase.id] with ID: e671132d-80d8-41ab-944a-acf42a71e4c9 2024-11-14T19:54:48,058 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/.tmp/hbase.id 2024-11-14T19:54:48,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:54:48,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:54:48,064 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/.tmp/hbase.id]:[hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/hbase.id] 2024-11-14T19:54:48,075 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:48,075 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:54:48,077 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T19:54:48,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,087 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:54:48,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:54:48,094 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:54:48,095 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:54:48,095 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:54:48,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:54:48,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:54:48,105 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store 2024-11-14T19:54:48,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:54:48,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:54:48,115 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:48,115 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:54:48,116 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:48,116 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:48,116 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:54:48,116 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:48,116 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:54:48,116 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614088115Disabling compacts and flushes for region at 1731614088115Disabling writes for close at 1731614088116 (+1 ms)Writing region close event to WAL at 1731614088116Closed at 1731614088116 2024-11-14T19:54:48,117 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/.initializing 2024-11-14T19:54:48,117 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/WALs/867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,119 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C39521%2C1731614087871, suffix=, logDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/WALs/867b237d0fa7,39521,1731614087871, archiveDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/oldWALs, maxLogs=10 2024-11-14T19:54:48,120 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39521%2C1731614087871.1731614088120 2024-11-14T19:54:48,129 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/WALs/867b237d0fa7,39521,1731614087871/867b237d0fa7%2C39521%2C1731614087871.1731614088120 2024-11-14T19:54:48,129 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45085:45085),(127.0.0.1/127.0.0.1:33739:33739)] 2024-11-14T19:54:48,130 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:54:48,130 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:48,130 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,130 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:54:48,133 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:54:48,135 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:48,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:54:48,137 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:48,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:54:48,138 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:48,139 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,140 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,140 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,141 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,141 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,142 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:54:48,143 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:54:48,145 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:54:48,146 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860164, jitterRate=0.09375634789466858}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:54:48,147 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731614088130Initializing all the Stores at 1731614088131 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088131Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614088132 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614088132Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614088132Cleaning up temporary data from old regions at 1731614088141 (+9 ms)Region opened successfully at 1731614088147 (+6 ms) 2024-11-14T19:54:48,147 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:54:48,151 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c88defa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:54:48,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:54:48,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:54:48,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:54:48,152 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:54:48,153 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T19:54:48,153 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T19:54:48,153 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:54:48,155 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:54:48,156 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:54:48,165 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:54:48,166 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:54:48,167 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:54:48,174 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:54:48,174 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:54:48,175 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:54:48,182 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:54:48,183 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:54:48,190 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:54:48,193 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:54:48,199 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:54:48,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:48,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,207 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:54:48,207 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,39521,1731614087871, sessionid=0x1013c17e0590000, setting cluster-up flag (Was=false) 2024-11-14T19:54:48,224 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,249 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:54:48,250 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,265 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,290 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:54:48,292 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,39521,1731614087871 2024-11-14T19:54:48,294 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:54:48,296 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:48,296 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:54:48,296 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:54:48,297 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,39521,1731614087871 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:54:48,298 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,299 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731614118299 2024-11-14T19:54:48,299 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:54:48,300 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:54:48,300 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:54:48,300 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:54:48,301 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:54:48,301 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:54:48,301 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614088301,5,FailOnTimeoutGroup] 2024-11-14T19:54:48,301 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614088301,5,FailOnTimeoutGroup] 2024-11-14T19:54:48,301 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,301 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:54:48,301 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,301 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,301 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,302 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:54:48,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:54:48,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:54:48,310 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:54:48,310 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067 2024-11-14T19:54:48,313 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(746): ClusterId : e671132d-80d8-41ab-944a-acf42a71e4c9 2024-11-14T19:54:48,313 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:54:48,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:54:48,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:54:48,317 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:48,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:54:48,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:54:48,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:54:48,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:54:48,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:54:48,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:54:48,324 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:54:48,324 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:54:48,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:54:48,326 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:54:48,326 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:54:48,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740 2024-11-14T19:54:48,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740 2024-11-14T19:54:48,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:54:48,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:54:48,330 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:54:48,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:54:48,333 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:54:48,333 DEBUG [RS:0;867b237d0fa7:39535 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e44ccd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:54:48,333 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:54:48,334 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694043, jitterRate=-0.11747930943965912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:54:48,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731614088318Initializing all the Stores at 1731614088318Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088318Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088319 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614088319Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088319Cleaning up temporary data from old regions at 1731614088329 (+10 ms)Region opened successfully at 1731614088334 (+5 ms) 2024-11-14T19:54:48,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:54:48,335 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:54:48,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:54:48,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:54:48,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:54:48,335 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:54:48,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614088335Disabling compacts and flushes for region at 1731614088335Disabling writes for close at 1731614088335Writing region close event to WAL at 1731614088335Closed at 1731614088335 2024-11-14T19:54:48,337 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:48,337 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:54:48,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:54:48,338 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:54:48,339 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:54:48,346 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:39535 2024-11-14T19:54:48,346 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:54:48,346 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:54:48,346 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:54:48,347 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,39521,1731614087871 with port=39535, startcode=1731614087990 2024-11-14T19:54:48,347 DEBUG [RS:0;867b237d0fa7:39535 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:54:48,349 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38045, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:54:48,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39521 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39521 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,352 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067 2024-11-14T19:54:48,352 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38891 2024-11-14T19:54:48,352 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:54:48,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:54:48,363 DEBUG [RS:0;867b237d0fa7:39535 {}] zookeeper.ZKUtil(111): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,363 WARN [RS:0;867b237d0fa7:39535 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:54:48,363 INFO [RS:0;867b237d0fa7:39535 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:54:48,363 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,364 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,39535,1731614087990] 2024-11-14T19:54:48,367 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:54:48,369 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:54:48,369 INFO [RS:0;867b237d0fa7:39535 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:54:48,369 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,370 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:54:48,371 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:54:48,371 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,371 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,372 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,372 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,372 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,372 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:54:48,372 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:54:48,372 DEBUG [RS:0;867b237d0fa7:39535 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:54:48,378 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,379 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,379 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,379 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,379 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,379 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39535,1731614087990-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:54:48,396 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:54:48,397 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39535,1731614087990-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,397 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,397 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.Replication(171): 867b237d0fa7,39535,1731614087990 started 2024-11-14T19:54:48,413 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:48,413 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,39535,1731614087990, RpcServer on 867b237d0fa7/172.17.0.2:39535, sessionid=0x1013c17e0590001 2024-11-14T19:54:48,413 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:54:48,413 DEBUG [RS:0;867b237d0fa7:39535 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,414 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,39535,1731614087990' 2024-11-14T19:54:48,414 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:54:48,414 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,39535,1731614087990' 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:54:48,415 DEBUG [RS:0;867b237d0fa7:39535 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:54:48,415 INFO [RS:0;867b237d0fa7:39535 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:54:48,415 INFO [RS:0;867b237d0fa7:39535 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:54:48,490 WARN [867b237d0fa7:39521 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T19:54:48,518 INFO [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C39535%2C1731614087990, suffix=, logDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990, archiveDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs, maxLogs=32 2024-11-14T19:54:48,519 INFO [RS:0;867b237d0fa7:39535 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39535%2C1731614087990.1731614088518 2024-11-14T19:54:48,525 INFO [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614088518 2024-11-14T19:54:48,526 DEBUG [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45085:45085),(127.0.0.1/127.0.0.1:33739:33739)] 2024-11-14T19:54:48,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:48,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:48,740 DEBUG [867b237d0fa7:39521 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:54:48,741 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,742 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,39535,1731614087990, state=OPENING 2024-11-14T19:54:48,770 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:54:48,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,779 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:54:48,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:48,780 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:54:48,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,39535,1731614087990}] 2024-11-14T19:54:48,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:48,935 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:54:48,937 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49943, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:54:48,941 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:54:48,941 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:54:48,943 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C39535%2C1731614087990.meta, suffix=.meta, logDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990, archiveDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs, maxLogs=32 2024-11-14T19:54:48,943 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39535%2C1731614087990.meta.1731614088943.meta 2024-11-14T19:54:48,949 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.meta.1731614088943.meta 2024-11-14T19:54:48,955 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33739:33739),(127.0.0.1/127.0.0.1:45085:45085)] 2024-11-14T19:54:48,957 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:54:48,957 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:54:48,957 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:54:48,958 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:54:48,958 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:54:48,958 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:48,958 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:54:48,958 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:54:48,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:54:48,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:54:48,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:54:48,967 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:54:48,967 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:54:48,968 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:54:48,968 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:54:48,970 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:54:48,970 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:48,970 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:54:48,971 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:54:48,972 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740 2024-11-14T19:54:48,973 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740 2024-11-14T19:54:48,974 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:54:48,974 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:54:48,975 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:54:48,977 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:54:48,978 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780543, jitterRate=-0.0074889808893203735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:54:48,978 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:54:48,979 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731614088958Writing region info on filesystem at 1731614088958Initializing all the Stores at 1731614088959 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088959Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088962 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614088962Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614088962Cleaning up temporary data from old regions at 1731614088974 (+12 ms)Running coprocessor post-open hooks at 1731614088978 (+4 ms)Region opened successfully at 1731614088978 2024-11-14T19:54:48,981 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731614088934 2024-11-14T19:54:48,984 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:54:48,984 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:54:48,985 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,39535,1731614087990 2024-11-14T19:54:48,986 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,39535,1731614087990, state=OPEN 2024-11-14T19:54:49,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:54:49,018 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:54:49,018 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,39535,1731614087990 2024-11-14T19:54:49,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:49,018 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:54:49,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:54:49,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,39535,1731614087990 in 238 msec 2024-11-14T19:54:49,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:54:49,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 684 msec 2024-11-14T19:54:49,025 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:54:49,025 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:54:49,026 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:54:49,027 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,39535,1731614087990, seqNum=-1] 2024-11-14T19:54:49,027 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:54:49,028 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41975, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:54:49,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 739 msec 2024-11-14T19:54:49,035 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731614089035, completionTime=-1 2024-11-14T19:54:49,035 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:54:49,036 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:54:49,037 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:54:49,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731614149037 2024-11-14T19:54:49,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614209038 2024-11-14T19:54:49,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-14T19:54:49,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39521,1731614087871-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39521,1731614087871-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39521,1731614087871-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,039 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:39521, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,039 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,039 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,041 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.995sec 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39521,1731614087871-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:54:49,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39521,1731614087871-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:54:49,047 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:54:49,047 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:54:49,047 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,39521,1731614087871-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:54:49,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19b29235, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:54:49,114 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,39521,-1 for getting cluster id 2024-11-14T19:54:49,114 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:54:49,117 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e671132d-80d8-41ab-944a-acf42a71e4c9' 2024-11-14T19:54:49,118 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:54:49,118 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e671132d-80d8-41ab-944a-acf42a71e4c9" 2024-11-14T19:54:49,118 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2020ad33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:54:49,118 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,39521,-1] 2024-11-14T19:54:49,119 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:54:49,119 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:54:49,120 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36940, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:54:49,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3564df9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:54:49,122 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:54:49,123 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,39535,1731614087990, seqNum=-1] 2024-11-14T19:54:49,123 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:54:49,125 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53378, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:54:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,39521,1731614087871 2024-11-14T19:54:49,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:54:49,131 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:54:49,132 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T19:54:49,133 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 867b237d0fa7,39521,1731614087871 2024-11-14T19:54:49,133 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@305a5bfe 2024-11-14T19:54:49,133 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T19:54:49,135 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T19:54:49,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T19:54:49,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T19:54:49,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:54:49,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:54:49,140 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T19:54:49,140 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:49,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-14T19:54:49,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:54:49,141 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T19:54:49,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741835_1011 (size=405) 2024-11-14T19:54:49,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741835_1011 (size=405) 2024-11-14T19:54:49,155 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 833e1474d6e88b3e653a72245f81c56f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067 2024-11-14T19:54:49,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741836_1012 (size=88) 2024-11-14T19:54:49,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741836_1012 (size=88) 2024-11-14T19:54:49,162 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:49,163 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 833e1474d6e88b3e653a72245f81c56f, disabling compactions & flushes 2024-11-14T19:54:49,163 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,163 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,163 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. after waiting 0 ms 2024-11-14T19:54:49,163 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,163 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,163 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 833e1474d6e88b3e653a72245f81c56f: Waiting for close lock at 1731614089163Disabling compacts and flushes for region at 1731614089163Disabling writes for close at 1731614089163Writing region close event to WAL at 1731614089163Closed at 1731614089163 2024-11-14T19:54:49,164 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T19:54:49,165 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731614089164"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731614089164"}]},"ts":"1731614089164"} 2024-11-14T19:54:49,168 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T19:54:49,169 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T19:54:49,170 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614089169"}]},"ts":"1731614089169"} 2024-11-14T19:54:49,172 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-14T19:54:49,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=833e1474d6e88b3e653a72245f81c56f, ASSIGN}] 2024-11-14T19:54:49,174 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=833e1474d6e88b3e653a72245f81c56f, ASSIGN 2024-11-14T19:54:49,176 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=833e1474d6e88b3e653a72245f81c56f, ASSIGN; state=OFFLINE, location=867b237d0fa7,39535,1731614087990; forceNewPlan=false, retain=false 2024-11-14T19:54:49,327 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=833e1474d6e88b3e653a72245f81c56f, regionState=OPENING, regionLocation=867b237d0fa7,39535,1731614087990 2024-11-14T19:54:49,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=833e1474d6e88b3e653a72245f81c56f, ASSIGN because future has completed 2024-11-14T19:54:49,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 833e1474d6e88b3e653a72245f81c56f, server=867b237d0fa7,39535,1731614087990}] 2024-11-14T19:54:49,490 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,490 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 833e1474d6e88b3e653a72245f81c56f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:54:49,491 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,491 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:54:49,491 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,491 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,492 INFO [StoreOpener-833e1474d6e88b3e653a72245f81c56f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,494 INFO [StoreOpener-833e1474d6e88b3e653a72245f81c56f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 833e1474d6e88b3e653a72245f81c56f columnFamilyName info 2024-11-14T19:54:49,494 DEBUG [StoreOpener-833e1474d6e88b3e653a72245f81c56f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:54:49,494 INFO [StoreOpener-833e1474d6e88b3e653a72245f81c56f-1 {}] regionserver.HStore(327): Store=833e1474d6e88b3e653a72245f81c56f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:54:49,495 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,495 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,496 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,496 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,496 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,498 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,500 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:54:49,501 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 833e1474d6e88b3e653a72245f81c56f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792537, jitterRate=0.007763579487800598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:54:49,501 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:54:49,501 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 833e1474d6e88b3e653a72245f81c56f: Running coprocessor pre-open hook at 1731614089491Writing region info on filesystem at 1731614089491Initializing all the Stores at 1731614089492 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614089492Cleaning up temporary data from old regions at 1731614089496 (+4 ms)Running coprocessor post-open hooks at 1731614089501 (+5 ms)Region opened successfully at 1731614089501 2024-11-14T19:54:49,503 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f., pid=6, masterSystemTime=1731614089485 2024-11-14T19:54:49,505 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,506 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:49,506 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=833e1474d6e88b3e653a72245f81c56f, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,39535,1731614087990 2024-11-14T19:54:49,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 833e1474d6e88b3e653a72245f81c56f, server=867b237d0fa7,39535,1731614087990 because future has completed 2024-11-14T19:54:49,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T19:54:49,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 833e1474d6e88b3e653a72245f81c56f, server=867b237d0fa7,39535,1731614087990 in 178 msec 2024-11-14T19:54:49,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T19:54:49,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=833e1474d6e88b3e653a72245f81c56f, ASSIGN in 340 msec 2024-11-14T19:54:49,517 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T19:54:49,517 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614089517"}]},"ts":"1731614089517"} 2024-11-14T19:54:49,519 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-14T19:54:49,521 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T19:54:49,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 385 msec 2024-11-14T19:54:49,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:49,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:50,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:50,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:50,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:54:50,669 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T19:54:50,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:54:50,670 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T19:54:50,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:54:50,670 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T19:54:51,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:51,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:52,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:52,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:53,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:53,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:53,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:53,995 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:54:54,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,522 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:54:54,531 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T19:54:54,531 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-14T19:54:54,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:54,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:55,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:55,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:56,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:56,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:57,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:57,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:58,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:58,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:54:59,237 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T19:54:59,237 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-14T19:54:59,241 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:54:59,241 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:59,245 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f., hostname=867b237d0fa7,39535,1731614087990, seqNum=2] 2024-11-14T19:54:59,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:54:59,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:54:59,260 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T19:54:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T19:54:59,261 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T19:54:59,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T19:54:59,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39535 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-14T19:54:59,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:59,423 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 833e1474d6e88b3e653a72245f81c56f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:54:59,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/22dcab8eede54ffa9bb18151d24a64c7 is 1080, key is row0001/info:/1731614099247/Put/seqid=0 2024-11-14T19:54:59,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741837_1013 (size=6033) 2024-11-14T19:54:59,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741837_1013 (size=6033) 2024-11-14T19:54:59,452 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/22dcab8eede54ffa9bb18151d24a64c7 2024-11-14T19:54:59,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/22dcab8eede54ffa9bb18151d24a64c7 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/22dcab8eede54ffa9bb18151d24a64c7 2024-11-14T19:54:59,465 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/22dcab8eede54ffa9bb18151d24a64c7, entries=1, sequenceid=5, filesize=5.9 K 2024-11-14T19:54:59,466 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 833e1474d6e88b3e653a72245f81c56f in 43ms, sequenceid=5, compaction requested=false 2024-11-14T19:54:59,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 833e1474d6e88b3e653a72245f81c56f: 2024-11-14T19:54:59,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:54:59,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-14T19:54:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-14T19:54:59,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T19:54:59,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-11-14T19:54:59,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-11-14T19:54:59,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:54:59,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:00,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:00,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:01,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:01,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:02,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:02,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:03,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:03,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:04,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:04,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:05,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:05,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:06,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:06,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:07,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:07,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:08,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:08,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:09,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T19:55:09,327 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T19:55:09,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:09,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:09,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T19:55:09,337 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T19:55:09,338 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T19:55:09,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T19:55:09,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39535 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-14T19:55:09,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:09,494 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 833e1474d6e88b3e653a72245f81c56f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:55:09,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/f5ab766cf27d43c2b6bab922293c28da is 1080, key is row0002/info:/1731614109329/Put/seqid=0 2024-11-14T19:55:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741838_1014 (size=6033) 2024-11-14T19:55:09,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741838_1014 (size=6033) 2024-11-14T19:55:09,512 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/f5ab766cf27d43c2b6bab922293c28da 2024-11-14T19:55:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/f5ab766cf27d43c2b6bab922293c28da as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/f5ab766cf27d43c2b6bab922293c28da 2024-11-14T19:55:09,525 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/f5ab766cf27d43c2b6bab922293c28da, entries=1, sequenceid=9, filesize=5.9 K 2024-11-14T19:55:09,527 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 833e1474d6e88b3e653a72245f81c56f in 33ms, sequenceid=9, compaction requested=false 2024-11-14T19:55:09,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 833e1474d6e88b3e653a72245f81c56f: 2024-11-14T19:55:09,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:09,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-14T19:55:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-14T19:55:09,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-14T19:55:09,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-11-14T19:55:09,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 200 msec 2024-11-14T19:55:09,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:09,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:10,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:10,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:11,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:11,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:12,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:12,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:12,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 after 68059ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:55:12,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta after 68052ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T19:55:13,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:13,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:14,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:14,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:15,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:15,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:16,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:16,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:17,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:17,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:17,853 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:55:18,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:18,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:19,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T19:55:19,427 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T19:55:19,430 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39535%2C1731614087990.1731614119430 2024-11-14T19:55:19,437 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:19,437 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:19,437 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:19,438 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:19,438 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:19,438 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614088518 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614119430 2024-11-14T19:55:19,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45085:45085),(127.0.0.1/127.0.0.1:33739:33739)] 2024-11-14T19:55:19,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614088518 is not closed yet, will try archiving it next time 2024-11-14T19:55:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741833_1009 (size=5546) 2024-11-14T19:55:19,440 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:19,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741833_1009 (size=5546) 2024-11-14T19:55:19,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:19,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T19:55:19,443 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T19:55:19,445 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T19:55:19,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T19:55:19,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39535 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-14T19:55:19,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:19,600 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 833e1474d6e88b3e653a72245f81c56f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:55:19,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/3a9a2ec9756c41daa5fd9f958b6353b0 is 1080, key is row0003/info:/1731614119428/Put/seqid=0 2024-11-14T19:55:19,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:19,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741840_1016 (size=6033) 2024-11-14T19:55:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741840_1016 (size=6033) 2024-11-14T19:55:19,616 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/3a9a2ec9756c41daa5fd9f958b6353b0 2024-11-14T19:55:19,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/3a9a2ec9756c41daa5fd9f958b6353b0 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/3a9a2ec9756c41daa5fd9f958b6353b0 2024-11-14T19:55:19,630 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/3a9a2ec9756c41daa5fd9f958b6353b0, entries=1, sequenceid=13, filesize=5.9 K 2024-11-14T19:55:19,631 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 833e1474d6e88b3e653a72245f81c56f in 31ms, sequenceid=13, compaction requested=true 2024-11-14T19:55:19,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 833e1474d6e88b3e653a72245f81c56f: 2024-11-14T19:55:19,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:19,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-14T19:55:19,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-14T19:55:19,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-14T19:55:19,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-14T19:55:19,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-11-14T19:55:20,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:20,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:21,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:21,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:22,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:22,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:23,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:23,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:24,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:24,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:25,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:25,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:26,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:26,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:27,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:27,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:28,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:28,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:29,053 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T19:55:29,054 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T19:55:29,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T19:55:29,497 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T19:55:29,498 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:55:29,501 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:55:29,501 DEBUG [Time-limited test {}] regionserver.HStore(1541): 833e1474d6e88b3e653a72245f81c56f/info is initiating minor compaction (all files) 2024-11-14T19:55:29,501 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:55:29,501 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:29,502 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 833e1474d6e88b3e653a72245f81c56f/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:29,502 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/22dcab8eede54ffa9bb18151d24a64c7, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/f5ab766cf27d43c2b6bab922293c28da, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/3a9a2ec9756c41daa5fd9f958b6353b0] into tmpdir=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp, totalSize=17.7 K 2024-11-14T19:55:29,503 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 22dcab8eede54ffa9bb18151d24a64c7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731614099247 2024-11-14T19:55:29,504 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f5ab766cf27d43c2b6bab922293c28da, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731614109329 2024-11-14T19:55:29,505 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3a9a2ec9756c41daa5fd9f958b6353b0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731614119428 2024-11-14T19:55:29,518 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 833e1474d6e88b3e653a72245f81c56f#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:55:29,518 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/61586206ff3745f78a742f3317ba07c9 is 1080, key is row0001/info:/1731614099247/Put/seqid=0 2024-11-14T19:55:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741841_1017 (size=8296) 2024-11-14T19:55:29,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741841_1017 (size=8296) 2024-11-14T19:55:29,533 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/61586206ff3745f78a742f3317ba07c9 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/61586206ff3745f78a742f3317ba07c9 2024-11-14T19:55:29,540 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 833e1474d6e88b3e653a72245f81c56f/info of 833e1474d6e88b3e653a72245f81c56f into 61586206ff3745f78a742f3317ba07c9(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:55:29,540 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 833e1474d6e88b3e653a72245f81c56f: 2024-11-14T19:55:29,543 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39535%2C1731614087990.1731614129542 2024-11-14T19:55:29,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:29,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:29,552 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:29,552 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:29,552 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:29,552 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614119430 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614129542 2024-11-14T19:55:29,553 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45085:45085),(127.0.0.1/127.0.0.1:33739:33739)] 2024-11-14T19:55:29,553 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614119430 is not closed yet, will try archiving it next time 2024-11-14T19:55:29,553 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614088518 to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs/867b237d0fa7%2C39535%2C1731614087990.1731614088518 2024-11-14T19:55:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741839_1015 (size=2520) 2024-11-14T19:55:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741839_1015 (size=2520) 2024-11-14T19:55:29,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T19:55:29,557 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T19:55:29,558 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T19:55:29,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T19:55:29,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:29,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:29,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39535 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-14T19:55:29,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:29,714 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 833e1474d6e88b3e653a72245f81c56f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:55:29,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/0477b0c9570448ba8d65a082ada334a0 is 1080, key is row0000/info:/1731614129541/Put/seqid=0 2024-11-14T19:55:29,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741843_1019 (size=6033) 2024-11-14T19:55:29,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741843_1019 (size=6033) 2024-11-14T19:55:29,726 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/0477b0c9570448ba8d65a082ada334a0 2024-11-14T19:55:29,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/0477b0c9570448ba8d65a082ada334a0 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/0477b0c9570448ba8d65a082ada334a0 2024-11-14T19:55:29,739 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/0477b0c9570448ba8d65a082ada334a0, entries=1, sequenceid=18, filesize=5.9 K 2024-11-14T19:55:29,740 INFO [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 833e1474d6e88b3e653a72245f81c56f in 27ms, sequenceid=18, compaction requested=false 2024-11-14T19:55:29,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 833e1474d6e88b3e653a72245f81c56f: 2024-11-14T19:55:29,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:29,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-14T19:55:29,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-14T19:55:29,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-14T19:55:29,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-14T19:55:29,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-14T19:55:30,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:30,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:31,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:31,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:32,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:32,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:33,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:33,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:34,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 833e1474d6e88b3e653a72245f81c56f, had cached 0 bytes from a total of 14329 2024-11-14T19:55:34,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:34,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:35,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:35,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:36,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:36,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:37,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:37,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:38,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:38,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:39,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39521 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T19:55:39,567 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T19:55:39,570 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C39535%2C1731614087990.1731614139570 2024-11-14T19:55:39,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,578 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,578 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,578 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,578 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,579 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614129542 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614139570 2024-11-14T19:55:39,580 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33739:33739),(127.0.0.1/127.0.0.1:45085:45085)] 2024-11-14T19:55:39,580 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614129542 is not closed yet, will try archiving it next time 2024-11-14T19:55:39,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:55:39,580 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614119430 to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs/867b237d0fa7%2C39535%2C1731614087990.1731614119430 2024-11-14T19:55:39,580 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:55:39,580 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:55:39,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:55:39,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:55:39,581 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:55:39,581 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:55:39,581 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1598465195, stopped=false 2024-11-14T19:55:39,581 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,39521,1731614087871 2024-11-14T19:55:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741842_1018 (size=2026) 2024-11-14T19:55:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741842_1018 (size=2026) 2024-11-14T19:55:39,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:55:39,593 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:55:39,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:39,593 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:39,593 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:55:39,593 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:55:39,593 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:55:39,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:55:39,593 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:55:39,593 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:55:39,593 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,39535,1731614087990' ***** 2024-11-14T19:55:39,593 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:55:39,594 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(3091): Received CLOSE for 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,39535,1731614087990 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:55:39,594 INFO [RS:0;867b237d0fa7:39535 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:39535. 2024-11-14T19:55:39,594 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 833e1474d6e88b3e653a72245f81c56f, disabling compactions & flushes 2024-11-14T19:55:39,594 DEBUG [RS:0;867b237d0fa7:39535 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:55:39,595 DEBUG [RS:0;867b237d0fa7:39535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:55:39,595 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:39,595 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. after waiting 0 ms 2024-11-14T19:55:39,595 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:39,595 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:55:39,595 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:55:39,595 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 833e1474d6e88b3e653a72245f81c56f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:55:39,595 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T19:55:39,595 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1325): Online Regions={833e1474d6e88b3e653a72245f81c56f=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T19:55:39,595 DEBUG [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 833e1474d6e88b3e653a72245f81c56f 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:55:39,595 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:55:39,595 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:55:39,595 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-14T19:55:39,599 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/cdb8e920dc7c4cc999578df80ce631f0 is 1080, key is row0001/info:/1731614139568/Put/seqid=0 2024-11-14T19:55:39,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741845_1021 (size=6033) 2024-11-14T19:55:39,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741845_1021 (size=6033) 2024-11-14T19:55:39,609 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/cdb8e920dc7c4cc999578df80ce631f0 2024-11-14T19:55:39,615 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/info/61f4ad398d884b93ac40c5eb11cf14de is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f./info:regioninfo/1731614089506/Put/seqid=0 2024-11-14T19:55:39,616 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/.tmp/info/cdb8e920dc7c4cc999578df80ce631f0 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/cdb8e920dc7c4cc999578df80ce631f0 2024-11-14T19:55:39,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741846_1022 (size=7308) 2024-11-14T19:55:39,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741846_1022 (size=7308) 2024-11-14T19:55:39,622 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/info/61f4ad398d884b93ac40c5eb11cf14de 2024-11-14T19:55:39,622 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/cdb8e920dc7c4cc999578df80ce631f0, entries=1, sequenceid=22, filesize=5.9 K 2024-11-14T19:55:39,624 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 833e1474d6e88b3e653a72245f81c56f in 28ms, sequenceid=22, compaction requested=true 2024-11-14T19:55:39,624 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/22dcab8eede54ffa9bb18151d24a64c7, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/f5ab766cf27d43c2b6bab922293c28da, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/3a9a2ec9756c41daa5fd9f958b6353b0] to archive 2024-11-14T19:55:39,625 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T19:55:39,627 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/22dcab8eede54ffa9bb18151d24a64c7 to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/22dcab8eede54ffa9bb18151d24a64c7 2024-11-14T19:55:39,628 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/f5ab766cf27d43c2b6bab922293c28da to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/f5ab766cf27d43c2b6bab922293c28da 2024-11-14T19:55:39,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:39,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:39,629 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/3a9a2ec9756c41daa5fd9f958b6353b0 to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/info/3a9a2ec9756c41daa5fd9f958b6353b0 2024-11-14T19:55:39,630 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=867b237d0fa7:39521 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T19:55:39,630 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [22dcab8eede54ffa9bb18151d24a64c7=6033, f5ab766cf27d43c2b6bab922293c28da=6033, 3a9a2ec9756c41daa5fd9f958b6353b0=6033] 2024-11-14T19:55:39,634 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/833e1474d6e88b3e653a72245f81c56f/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-14T19:55:39,635 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:39,635 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 833e1474d6e88b3e653a72245f81c56f: Waiting for close lock at 1731614139594Running coprocessor pre-close hooks at 1731614139594Disabling compacts and flushes for region at 1731614139594Disabling writes for close at 1731614139595 (+1 ms)Obtaining lock to block concurrent updates at 1731614139595Preparing flush snapshotting stores in 833e1474d6e88b3e653a72245f81c56f at 1731614139595Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731614139595Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. at 1731614139596 (+1 ms)Flushing 833e1474d6e88b3e653a72245f81c56f/info: creating writer at 1731614139596Flushing 833e1474d6e88b3e653a72245f81c56f/info: appending metadata at 1731614139599 (+3 ms)Flushing 833e1474d6e88b3e653a72245f81c56f/info: closing flushed file at 1731614139599Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3096c2cd: reopening flushed file at 1731614139615 (+16 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 833e1474d6e88b3e653a72245f81c56f in 28ms, sequenceid=22, compaction requested=true at 1731614139624 (+9 ms)Writing region close event to WAL at 1731614139630 (+6 ms)Running coprocessor post-close hooks at 1731614139635 (+5 ms)Closed at 1731614139635 2024-11-14T19:55:39,635 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731614089135.833e1474d6e88b3e653a72245f81c56f. 2024-11-14T19:55:39,642 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/ns/2c268262b7194267804826237da36e12 is 43, key is default/ns:d/1731614089029/Put/seqid=0 2024-11-14T19:55:39,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741847_1023 (size=5153) 2024-11-14T19:55:39,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741847_1023 (size=5153) 2024-11-14T19:55:39,647 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/ns/2c268262b7194267804826237da36e12 2024-11-14T19:55:39,666 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/table/b304896b9e974e4c8a69615910c9c6f0 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731614089517/Put/seqid=0 2024-11-14T19:55:39,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741848_1024 (size=5508) 2024-11-14T19:55:39,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741848_1024 (size=5508) 2024-11-14T19:55:39,671 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/table/b304896b9e974e4c8a69615910c9c6f0 2024-11-14T19:55:39,679 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/info/61f4ad398d884b93ac40c5eb11cf14de as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/info/61f4ad398d884b93ac40c5eb11cf14de 2024-11-14T19:55:39,685 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/info/61f4ad398d884b93ac40c5eb11cf14de, entries=10, sequenceid=11, filesize=7.1 K 2024-11-14T19:55:39,687 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/ns/2c268262b7194267804826237da36e12 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/ns/2c268262b7194267804826237da36e12 2024-11-14T19:55:39,693 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/ns/2c268262b7194267804826237da36e12, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T19:55:39,694 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/.tmp/table/b304896b9e974e4c8a69615910c9c6f0 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/table/b304896b9e974e4c8a69615910c9c6f0 2024-11-14T19:55:39,701 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/table/b304896b9e974e4c8a69615910c9c6f0, entries=2, sequenceid=11, filesize=5.4 K 2024-11-14T19:55:39,703 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-14T19:55:39,708 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T19:55:39,709 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:55:39,709 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:55:39,709 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614139595Running coprocessor pre-close hooks at 1731614139595Disabling compacts and flushes for region at 1731614139595Disabling writes for close at 1731614139595Obtaining lock to block concurrent updates at 1731614139595Preparing flush snapshotting stores in 1588230740 at 1731614139595Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731614139596 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731614139596Flushing 1588230740/info: creating writer at 1731614139597 (+1 ms)Flushing 1588230740/info: appending metadata at 1731614139615 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731614139615Flushing 1588230740/ns: creating writer at 1731614139627 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731614139641 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731614139641Flushing 1588230740/table: creating writer at 1731614139652 (+11 ms)Flushing 1588230740/table: appending metadata at 1731614139665 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731614139665Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ab26300: reopening flushed file at 1731614139678 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36115255: reopening flushed file at 1731614139686 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3571816c: reopening flushed file at 1731614139693 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false at 1731614139703 (+10 ms)Writing region close event to WAL at 1731614139704 (+1 ms)Running coprocessor post-close hooks at 1731614139709 (+5 ms)Closed at 1731614139709 2024-11-14T19:55:39,709 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:55:39,795 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,39535,1731614087990; all regions closed. 2024-11-14T19:55:39,796 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,796 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,796 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,797 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,797 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741834_1010 (size=3306) 2024-11-14T19:55:39,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741834_1010 (size=3306) 2024-11-14T19:55:39,805 DEBUG [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs 2024-11-14T19:55:39,805 INFO [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C39535%2C1731614087990.meta:.meta(num 1731614088943) 2024-11-14T19:55:39,806 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,806 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,806 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,806 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,807 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:39,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741844_1020 (size=1252) 2024-11-14T19:55:39,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741844_1020 (size=1252) 2024-11-14T19:55:39,984 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/WALs/867b237d0fa7,39535,1731614087990/867b237d0fa7%2C39535%2C1731614087990.1731614129542 to hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs/867b237d0fa7%2C39535%2C1731614087990.1731614129542 2024-11-14T19:55:39,991 DEBUG [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/oldWALs 2024-11-14T19:55:39,991 INFO [RS:0;867b237d0fa7:39535 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C39535%2C1731614087990:(num 1731614139570) 2024-11-14T19:55:39,991 DEBUG [RS:0;867b237d0fa7:39535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:55:39,991 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:55:39,991 INFO [RS:0;867b237d0fa7:39535 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:55:39,991 INFO [RS:0;867b237d0fa7:39535 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T19:55:39,991 INFO [RS:0;867b237d0fa7:39535 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:55:39,991 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:55:39,992 INFO [RS:0;867b237d0fa7:39535 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39535 2024-11-14T19:55:40,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:55:40,001 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,39535,1731614087990 2024-11-14T19:55:40,001 INFO [RS:0;867b237d0fa7:39535 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:55:40,009 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,39535,1731614087990] 2024-11-14T19:55:40,017 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,39535,1731614087990 already deleted, retry=false 2024-11-14T19:55:40,018 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,39535,1731614087990 expired; onlineServers=0 2024-11-14T19:55:40,018 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,39521,1731614087871' ***** 2024-11-14T19:55:40,018 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:55:40,018 INFO [M:0;867b237d0fa7:39521 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:55:40,018 INFO [M:0;867b237d0fa7:39521 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:55:40,018 DEBUG [M:0;867b237d0fa7:39521 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:55:40,018 DEBUG [M:0;867b237d0fa7:39521 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:55:40,018 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:55:40,018 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614088301 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614088301,5,FailOnTimeoutGroup] 2024-11-14T19:55:40,018 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614088301 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614088301,5,FailOnTimeoutGroup] 2024-11-14T19:55:40,018 INFO [M:0;867b237d0fa7:39521 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:55:40,018 INFO [M:0;867b237d0fa7:39521 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:55:40,018 DEBUG [M:0;867b237d0fa7:39521 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:55:40,018 INFO [M:0;867b237d0fa7:39521 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:55:40,018 INFO [M:0;867b237d0fa7:39521 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:55:40,019 INFO [M:0;867b237d0fa7:39521 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:55:40,019 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:55:40,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:55:40,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:40,026 DEBUG [M:0;867b237d0fa7:39521 {}] zookeeper.ZKUtil(347): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T19:55:40,026 WARN [M:0;867b237d0fa7:39521 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T19:55:40,026 INFO [M:0;867b237d0fa7:39521 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/.lastflushedseqids 2024-11-14T19:55:40,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741849_1025 (size=130) 2024-11-14T19:55:40,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741849_1025 (size=130) 2024-11-14T19:55:40,032 INFO [M:0;867b237d0fa7:39521 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:55:40,032 INFO [M:0;867b237d0fa7:39521 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:55:40,032 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:55:40,032 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:40,032 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:40,032 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:55:40,032 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:40,032 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-14T19:55:40,047 DEBUG [M:0;867b237d0fa7:39521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8bdec68d359c402a8a0cc4a4ebedf69f is 82, key is hbase:meta,,1/info:regioninfo/1731614088985/Put/seqid=0 2024-11-14T19:55:40,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741850_1026 (size=5672) 2024-11-14T19:55:40,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741850_1026 (size=5672) 2024-11-14T19:55:40,052 INFO [M:0;867b237d0fa7:39521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8bdec68d359c402a8a0cc4a4ebedf69f 2024-11-14T19:55:40,071 DEBUG [M:0;867b237d0fa7:39521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fabb084e08b345f0a744ff9b509008e5 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731614089523/Put/seqid=0 2024-11-14T19:55:40,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741851_1027 (size=7818) 2024-11-14T19:55:40,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741851_1027 (size=7818) 2024-11-14T19:55:40,076 INFO [M:0;867b237d0fa7:39521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fabb084e08b345f0a744ff9b509008e5 2024-11-14T19:55:40,080 INFO [M:0;867b237d0fa7:39521 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fabb084e08b345f0a744ff9b509008e5 2024-11-14T19:55:40,096 DEBUG [M:0;867b237d0fa7:39521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a5783d6efa00409bad071c3ee350cadb is 69, key is 867b237d0fa7,39535,1731614087990/rs:state/1731614088350/Put/seqid=0 2024-11-14T19:55:40,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741852_1028 (size=5156) 2024-11-14T19:55:40,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741852_1028 (size=5156) 2024-11-14T19:55:40,101 INFO [M:0;867b237d0fa7:39521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a5783d6efa00409bad071c3ee350cadb 2024-11-14T19:55:40,109 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:55:40,109 INFO [RS:0;867b237d0fa7:39535 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:55:40,109 DEBUG [pool-700-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39535-0x1013c17e0590001, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:55:40,109 INFO [RS:0;867b237d0fa7:39535 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,39535,1731614087990; zookeeper connection closed. 2024-11-14T19:55:40,110 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@138126e0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@138126e0 2024-11-14T19:55:40,110 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T19:55:40,121 DEBUG [M:0;867b237d0fa7:39521 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d4d1abc3379848108593e69633d1573f is 52, key is load_balancer_on/state:d/1731614089130/Put/seqid=0 2024-11-14T19:55:40,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741853_1029 (size=5056) 2024-11-14T19:55:40,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741853_1029 (size=5056) 2024-11-14T19:55:40,126 INFO [M:0;867b237d0fa7:39521 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d4d1abc3379848108593e69633d1573f 2024-11-14T19:55:40,132 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8bdec68d359c402a8a0cc4a4ebedf69f as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8bdec68d359c402a8a0cc4a4ebedf69f 2024-11-14T19:55:40,136 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8bdec68d359c402a8a0cc4a4ebedf69f, entries=8, sequenceid=121, filesize=5.5 K 2024-11-14T19:55:40,137 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fabb084e08b345f0a744ff9b509008e5 as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fabb084e08b345f0a744ff9b509008e5 2024-11-14T19:55:40,142 INFO [M:0;867b237d0fa7:39521 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fabb084e08b345f0a744ff9b509008e5 2024-11-14T19:55:40,143 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fabb084e08b345f0a744ff9b509008e5, entries=14, sequenceid=121, filesize=7.6 K 2024-11-14T19:55:40,143 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a5783d6efa00409bad071c3ee350cadb as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a5783d6efa00409bad071c3ee350cadb 2024-11-14T19:55:40,148 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a5783d6efa00409bad071c3ee350cadb, entries=1, sequenceid=121, filesize=5.0 K 2024-11-14T19:55:40,149 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d4d1abc3379848108593e69633d1573f as hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d4d1abc3379848108593e69633d1573f 2024-11-14T19:55:40,154 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38891/user/jenkins/test-data/6cbed0ac-8a93-ce92-e2d2-aedcb12e4067/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d4d1abc3379848108593e69633d1573f, entries=1, sequenceid=121, filesize=4.9 K 2024-11-14T19:55:40,155 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=121, compaction requested=false 2024-11-14T19:55:40,156 INFO [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:40,156 DEBUG [M:0;867b237d0fa7:39521 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614140032Disabling compacts and flushes for region at 1731614140032Disabling writes for close at 1731614140032Obtaining lock to block concurrent updates at 1731614140032Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731614140032Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731614140033 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731614140033Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731614140033Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731614140046 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731614140046Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614140056 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614140070 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614140070Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614140081 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614140095 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614140095Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614140106 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614140120 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614140121 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f6fcfa1: reopening flushed file at 1731614140131 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3553730e: reopening flushed file at 1731614140137 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6172e0d2: reopening flushed file at 1731614140143 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48bbe89a: reopening flushed file at 1731614140148 (+5 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=121, compaction requested=false at 1731614140155 (+7 ms)Writing region close event to WAL at 1731614140156 (+1 ms)Closed at 1731614140156 2024-11-14T19:55:40,156 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:40,157 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:40,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:40,157 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:40,157 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:55:40,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40795 is added to blk_1073741830_1006 (size=52987) 2024-11-14T19:55:40,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45499 is added to blk_1073741830_1006 (size=52987) 2024-11-14T19:55:40,159 INFO [M:0;867b237d0fa7:39521 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:55:40,159 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:55:40,159 INFO [M:0;867b237d0fa7:39521 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39521 2024-11-14T19:55:40,160 INFO [M:0;867b237d0fa7:39521 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:55:40,268 INFO [M:0;867b237d0fa7:39521 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:55:40,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:55:40,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39521-0x1013c17e0590000, quorum=127.0.0.1:49808, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:55:40,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36ee399a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:55:40,274 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5393e504{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:55:40,274 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:55:40,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62e7c7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:55:40,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a5795ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir/,STOPPED} 2024-11-14T19:55:40,278 WARN [BP-46802264-172.17.0.2-1731614086336 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:55:40,278 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:55:40,278 WARN [BP-46802264-172.17.0.2-1731614086336 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-46802264-172.17.0.2-1731614086336 (Datanode Uuid daa7497d-18ff-42e6-b9fc-eea280bbe94b) service to localhost/127.0.0.1:38891 2024-11-14T19:55:40,278 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:55:40,279 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data3/current/BP-46802264-172.17.0.2-1731614086336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:55:40,280 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data4/current/BP-46802264-172.17.0.2-1731614086336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:55:40,280 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:55:40,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75949488{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:55:40,283 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53a45e9d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:55:40,283 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:55:40,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65e861e1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:55:40,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cce55bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir/,STOPPED} 2024-11-14T19:55:40,284 WARN [BP-46802264-172.17.0.2-1731614086336 heartbeating to localhost/127.0.0.1:38891 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:55:40,284 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:55:40,284 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:55:40,284 WARN [BP-46802264-172.17.0.2-1731614086336 heartbeating to localhost/127.0.0.1:38891 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-46802264-172.17.0.2-1731614086336 (Datanode Uuid 1c7fda34-774a-478a-a2fe-d225c9bd26de) service to localhost/127.0.0.1:38891 2024-11-14T19:55:40,285 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data1/current/BP-46802264-172.17.0.2-1731614086336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:55:40,285 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/cluster_cf91a5d0-4b02-8cc4-12f9-60484908a28e/data/data2/current/BP-46802264-172.17.0.2-1731614086336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:55:40,285 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:55:40,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5da50ccf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:55:40,292 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13aafd08{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:55:40,292 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:55:40,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3def846f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:55:40,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53fdada6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir/,STOPPED} 2024-11-14T19:55:40,299 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:55:40,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:55:40,324 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38891 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38891 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:38891 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:38891 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:38891 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=177 (was 255), ProcessCount=11 (was 11), AvailableMemoryMB=5702 (was 6031) 2024-11-14T19:55:40,332 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=177, ProcessCount=11, AvailableMemoryMB=5702 2024-11-14T19:55:40,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:55:40,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.log.dir so I do NOT create it in target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8 2024-11-14T19:55:40,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1e4d9370-d700-7d9f-8226-b5c363b694f9/hadoop.tmp.dir so I do NOT create it in target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8 2024-11-14T19:55:40,332 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14, deleteOnExit=true 2024-11-14T19:55:40,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/test.cache.data in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:55:40,333 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:55:40,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:55:40,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:55:40,347 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:55:40,382 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:55:40,567 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:55:40,573 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:55:40,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:55:40,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:55:40,575 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:55:40,576 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:55:40,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9c2a749{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:55:40,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63529b4a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:55:40,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:40,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:40,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3978da62{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/java.io.tmpdir/jetty-localhost-38917-hadoop-hdfs-3_4_1-tests_jar-_-any-17571936803610746922/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:55:40,667 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a583b2e{HTTP/1.1, (http/1.1)}{localhost:38917} 2024-11-14T19:55:40,667 INFO [Time-limited test {}] server.Server(415): Started @252713ms 2024-11-14T19:55:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:55:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:55:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:55:40,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T19:55:40,679 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:55:40,866 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:55:40,868 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:55:40,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:55:40,869 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:55:40,869 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:55:40,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59cf886d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:55:40,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@197b82ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:55:40,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74aeebd9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/java.io.tmpdir/jetty-localhost-38583-hadoop-hdfs-3_4_1-tests_jar-_-any-15320667006176115226/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:55:40,962 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17d44920{HTTP/1.1, (http/1.1)}{localhost:38583} 2024-11-14T19:55:40,962 INFO [Time-limited test {}] server.Server(415): Started @253007ms 2024-11-14T19:55:40,963 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:55:40,988 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:55:40,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:55:40,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:55:40,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:55:40,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:55:40,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36a8171e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:55:40,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b969725{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:55:41,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@247322e7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/java.io.tmpdir/jetty-localhost-36967-hadoop-hdfs-3_4_1-tests_jar-_-any-8422571507791018631/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:55:41,082 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@173a06b0{HTTP/1.1, (http/1.1)}{localhost:36967} 2024-11-14T19:55:41,082 INFO [Time-limited test {}] server.Server(415): Started @253128ms 2024-11-14T19:55:41,084 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:55:41,539 WARN [Thread-1968 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data1/current/BP-456699626-172.17.0.2-1731614140350/current, will proceed with Du for space computation calculation, 2024-11-14T19:55:41,539 WARN [Thread-1969 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data2/current/BP-456699626-172.17.0.2-1731614140350/current, will proceed with Du for space computation calculation, 2024-11-14T19:55:41,554 WARN [Thread-1932 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:55:41,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x125f4abbe4f30581 with lease ID 0x52ed5badd318651c: Processing first storage report for DS-752d38cc-ae95-41d4-bfa4-1660a4face0f from datanode DatanodeRegistration(127.0.0.1:44395, datanodeUuid=9da3c350-5010-4b5d-b5f8-03bde92f3da2, infoPort=36971, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350) 2024-11-14T19:55:41,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x125f4abbe4f30581 with lease ID 0x52ed5badd318651c: from storage DS-752d38cc-ae95-41d4-bfa4-1660a4face0f node DatanodeRegistration(127.0.0.1:44395, datanodeUuid=9da3c350-5010-4b5d-b5f8-03bde92f3da2, infoPort=36971, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:55:41,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x125f4abbe4f30581 with lease ID 0x52ed5badd318651c: Processing first storage report for DS-6ccf4612-654c-43b1-8075-b4036f08b815 from datanode DatanodeRegistration(127.0.0.1:44395, datanodeUuid=9da3c350-5010-4b5d-b5f8-03bde92f3da2, infoPort=36971, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350) 2024-11-14T19:55:41,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x125f4abbe4f30581 with lease ID 0x52ed5badd318651c: from storage DS-6ccf4612-654c-43b1-8075-b4036f08b815 node DatanodeRegistration(127.0.0.1:44395, datanodeUuid=9da3c350-5010-4b5d-b5f8-03bde92f3da2, infoPort=36971, infoSecurePort=0, ipcPort=43065, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:55:41,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:41,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:41,657 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data3/current/BP-456699626-172.17.0.2-1731614140350/current, will proceed with Du for space computation calculation, 2024-11-14T19:55:41,657 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data4/current/BP-456699626-172.17.0.2-1731614140350/current, will proceed with Du for space computation calculation, 2024-11-14T19:55:41,671 WARN [Thread-1955 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:55:41,673 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59b6f021bfe076f6 with lease ID 0x52ed5badd318651d: Processing first storage report for DS-cb08cfed-c85e-4ef6-a7de-4e71384671c0 from datanode DatanodeRegistration(127.0.0.1:37851, datanodeUuid=eff9e298-4f38-4ea0-a706-4b632e33d878, infoPort=35647, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350) 2024-11-14T19:55:41,673 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59b6f021bfe076f6 with lease ID 0x52ed5badd318651d: from storage DS-cb08cfed-c85e-4ef6-a7de-4e71384671c0 node DatanodeRegistration(127.0.0.1:37851, datanodeUuid=eff9e298-4f38-4ea0-a706-4b632e33d878, infoPort=35647, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:55:41,673 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59b6f021bfe076f6 with lease ID 0x52ed5badd318651d: Processing first storage report for DS-f3ba8a95-5171-4e54-8a8e-bc9a0441b47c from datanode DatanodeRegistration(127.0.0.1:37851, datanodeUuid=eff9e298-4f38-4ea0-a706-4b632e33d878, infoPort=35647, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350) 2024-11-14T19:55:41,673 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59b6f021bfe076f6 with lease ID 0x52ed5badd318651d: from storage DS-f3ba8a95-5171-4e54-8a8e-bc9a0441b47c node DatanodeRegistration(127.0.0.1:37851, datanodeUuid=eff9e298-4f38-4ea0-a706-4b632e33d878, infoPort=35647, infoSecurePort=0, ipcPort=41027, storageInfo=lv=-57;cid=testClusterID;nsid=1640285387;c=1731614140350), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:55:41,709 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8 2024-11-14T19:55:41,715 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/zookeeper_0, clientPort=58796, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:55:41,716 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58796 2024-11-14T19:55:41,717 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,719 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:55:41,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:55:41,731 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66 with version=8 2024-11-14T19:55:41,731 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:55:41,733 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:55:41,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:55:41,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:55:41,733 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:55:41,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:55:41,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:55:41,733 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:55:41,734 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:55:41,734 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44359 2024-11-14T19:55:41,736 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44359 connecting to ZooKeeper ensemble=127.0.0.1:58796 2024-11-14T19:55:41,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443590x0, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:55:41,781 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44359-0x1013c18b2b80000 connected 2024-11-14T19:55:41,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,849 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:55:41,849 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66, hbase.cluster.distributed=false 2024-11-14T19:55:41,851 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:55:41,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44359 2024-11-14T19:55:41,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44359 2024-11-14T19:55:41,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44359 2024-11-14T19:55:41,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44359 2024-11-14T19:55:41,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44359 2024-11-14T19:55:41,870 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:55:41,870 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:55:41,870 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:55:41,870 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:55:41,870 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:55:41,870 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:55:41,870 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:55:41,871 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:55:41,871 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36939 2024-11-14T19:55:41,873 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36939 connecting to ZooKeeper ensemble=127.0.0.1:58796 2024-11-14T19:55:41,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,875 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369390x0, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:55:41,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:369390x0, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:55:41,888 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36939-0x1013c18b2b80001 connected 2024-11-14T19:55:41,888 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:55:41,889 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:55:41,889 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:55:41,890 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:55:41,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36939 2024-11-14T19:55:41,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36939 2024-11-14T19:55:41,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36939 2024-11-14T19:55:41,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36939 2024-11-14T19:55:41,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36939 2024-11-14T19:55:41,904 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:44359 2024-11-14T19:55:41,905 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,44359,1731614141733 2024-11-14T19:55:41,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:55:41,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:55:41,913 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,44359,1731614141733 2024-11-14T19:55:41,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:41,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:55:41,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:41,921 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:55:41,922 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,44359,1731614141733 from backup master directory 2024-11-14T19:55:41,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:55:41,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,44359,1731614141733 2024-11-14T19:55:41,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:55:41,929 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:55:41,929 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,44359,1731614141733 2024-11-14T19:55:41,933 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/hbase.id] with ID: ba87bd9f-769a-42bc-a51d-e3369d597971 2024-11-14T19:55:41,933 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/.tmp/hbase.id 2024-11-14T19:55:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:55:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:55:41,941 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/.tmp/hbase.id]:[hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/hbase.id] 2024-11-14T19:55:41,953 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:41,953 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:55:41,955 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T19:55:41,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:41,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:41,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:55:41,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:55:41,970 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:55:41,972 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:55:41,972 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:55:41,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:55:41,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:55:41,981 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store 2024-11-14T19:55:41,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:55:41,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:55:41,993 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:55:41,993 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:55:41,993 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:41,993 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:41,993 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:55:41,993 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:41,993 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:55:41,993 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614141993Disabling compacts and flushes for region at 1731614141993Disabling writes for close at 1731614141993Writing region close event to WAL at 1731614141993Closed at 1731614141993 2024-11-14T19:55:41,994 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/.initializing 2024-11-14T19:55:41,994 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/WALs/867b237d0fa7,44359,1731614141733 2024-11-14T19:55:41,997 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C44359%2C1731614141733, suffix=, logDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/WALs/867b237d0fa7,44359,1731614141733, archiveDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/oldWALs, maxLogs=10 2024-11-14T19:55:41,997 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C44359%2C1731614141733.1731614141997 2024-11-14T19:55:42,006 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/WALs/867b237d0fa7,44359,1731614141733/867b237d0fa7%2C44359%2C1731614141733.1731614141997 2024-11-14T19:55:42,011 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35647:35647),(127.0.0.1/127.0.0.1:36971:36971)] 2024-11-14T19:55:42,019 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:55:42,019 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:55:42,019 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,019 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:55:42,023 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,025 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:55:42,025 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:55:42,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:55:42,027 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:55:42,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:55:42,029 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:55:42,030 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,031 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,032 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,033 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,033 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,034 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:55:42,035 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:55:42,038 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:55:42,038 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709924, jitterRate=-0.09728619456291199}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:55:42,039 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731614142019Initializing all the Stores at 1731614142020 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142021 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614142021Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614142021Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614142021Cleaning up temporary data from old regions at 1731614142033 (+12 ms)Region opened successfully at 1731614142039 (+6 ms) 2024-11-14T19:55:42,039 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:55:42,042 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4513d019, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:55:42,043 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:55:42,043 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:55:42,043 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:55:42,043 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:55:42,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T19:55:42,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T19:55:42,044 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:55:42,046 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:55:42,047 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:55:42,054 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:55:42,054 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:55:42,055 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:55:42,062 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:55:42,063 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:55:42,064 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:55:42,071 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:55:42,072 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:55:42,079 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:55:42,081 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:55:42,087 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:55:42,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:55:42,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:55:42,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,096 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,44359,1731614141733, sessionid=0x1013c18b2b80000, setting cluster-up flag (Was=false) 2024-11-14T19:55:42,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,137 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:55:42,139 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,44359,1731614141733 2024-11-14T19:55:42,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,179 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:55:42,181 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,44359,1731614141733 2024-11-14T19:55:42,182 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:55:42,184 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:55:42,185 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:55:42,185 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:55:42,185 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,44359,1731614141733 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:55:42,187 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731614172188 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:55:42,189 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:55:42,189 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:55:42,189 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:55:42,190 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,190 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614142190,5,FailOnTimeoutGroup] 2024-11-14T19:55:42,190 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614142190,5,FailOnTimeoutGroup] 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,190 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:55:42,190 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,196 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(746): ClusterId : ba87bd9f-769a-42bc-a51d-e3369d597971 2024-11-14T19:55:42,196 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:55:42,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:55:42,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:55:42,205 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:55:42,205 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:55:42,206 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:55:42,206 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66 2024-11-14T19:55:42,213 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:55:42,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:55:42,214 DEBUG [RS:0;867b237d0fa7:36939 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@968cf2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:55:42,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:55:42,214 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:55:42,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:55:42,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:55:42,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:55:42,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:55:42,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:55:42,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:55:42,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:55:42,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:55:42,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:55:42,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740 2024-11-14T19:55:42,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740 2024-11-14T19:55:42,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:55:42,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:55:42,229 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:55:42,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:55:42,230 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:36939 2024-11-14T19:55:42,230 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:55:42,230 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:55:42,230 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:55:42,231 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,44359,1731614141733 with port=36939, startcode=1731614141870 2024-11-14T19:55:42,231 DEBUG [RS:0;867b237d0fa7:36939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:55:42,232 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:55:42,232 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876039, jitterRate=0.11394152045249939}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:55:42,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731614142214Initializing all the Stores at 1731614142215 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142215Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142220 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614142220Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142220Cleaning up temporary data from old regions at 1731614142229 (+9 ms)Region opened successfully at 1731614142233 (+4 ms) 2024-11-14T19:55:42,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:55:42,233 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:55:42,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:55:42,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:55:42,233 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57751, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:55:42,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:55:42,234 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:55:42,234 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44359 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614142233Disabling compacts and flushes for region at 1731614142233Disabling writes for close at 1731614142233Writing region close event to WAL at 1731614142234 (+1 ms)Closed at 1731614142234 2024-11-14T19:55:42,234 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44359 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,235 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:55:42,235 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:55:42,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:55:42,236 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66 2024-11-14T19:55:42,236 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39689 2024-11-14T19:55:42,236 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:55:42,237 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:55:42,238 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:55:42,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:55:42,246 DEBUG [RS:0;867b237d0fa7:36939 {}] zookeeper.ZKUtil(111): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,246 WARN [RS:0;867b237d0fa7:36939 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:55:42,246 INFO [RS:0;867b237d0fa7:36939 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:55:42,246 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,247 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,36939,1731614141870] 2024-11-14T19:55:42,249 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:55:42,251 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:55:42,251 INFO [RS:0;867b237d0fa7:36939 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:55:42,251 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,254 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:55:42,255 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:55:42,255 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,255 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,255 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:55:42,256 DEBUG [RS:0;867b237d0fa7:36939 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:55:42,256 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,256 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,256 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,256 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,256 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,256 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,36939,1731614141870-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:55:42,270 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:55:42,270 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,36939,1731614141870-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,271 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,271 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.Replication(171): 867b237d0fa7,36939,1731614141870 started 2024-11-14T19:55:42,284 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,284 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,36939,1731614141870, RpcServer on 867b237d0fa7/172.17.0.2:36939, sessionid=0x1013c18b2b80001 2024-11-14T19:55:42,284 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:55:42,285 DEBUG [RS:0;867b237d0fa7:36939 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,285 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,36939,1731614141870' 2024-11-14T19:55:42,285 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:55:42,285 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,36939,1731614141870' 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:55:42,286 DEBUG [RS:0;867b237d0fa7:36939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:55:42,286 INFO [RS:0;867b237d0fa7:36939 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:55:42,286 INFO [RS:0;867b237d0fa7:36939 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:55:42,388 WARN [867b237d0fa7:44359 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T19:55:42,391 INFO [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C36939%2C1731614141870, suffix=, logDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870, archiveDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/oldWALs, maxLogs=32 2024-11-14T19:55:42,392 INFO [RS:0;867b237d0fa7:36939 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C36939%2C1731614141870.1731614142391 2024-11-14T19:55:42,400 INFO [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614142391 2024-11-14T19:55:42,401 DEBUG [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36971:36971),(127.0.0.1/127.0.0.1:35647:35647)] 2024-11-14T19:55:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:42,638 DEBUG [867b237d0fa7:44359 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:55:42,639 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,640 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,36939,1731614141870, state=OPENING 2024-11-14T19:55:42,651 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:55:42,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:55:42,660 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:55:42,660 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:55:42,660 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:55:42,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,36939,1731614141870}] 2024-11-14T19:55:42,813 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:55:42,815 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35405, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:55:42,819 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:55:42,819 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:55:42,821 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C36939%2C1731614141870.meta, suffix=.meta, logDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870, archiveDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/oldWALs, maxLogs=32 2024-11-14T19:55:42,822 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C36939%2C1731614141870.meta.1731614142822.meta 2024-11-14T19:55:42,828 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.meta.1731614142822.meta 2024-11-14T19:55:42,830 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36971:36971),(127.0.0.1/127.0.0.1:35647:35647)] 2024-11-14T19:55:42,833 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:55:42,834 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:55:42,834 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:55:42,834 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:55:42,834 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:55:42,834 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:55:42,834 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:55:42,834 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:55:42,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:55:42,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:55:42,836 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:55:42,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:55:42,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:55:42,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:55:42,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:55:42,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:55:42,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:42,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:55:42,841 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:55:42,842 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740 2024-11-14T19:55:42,843 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740 2024-11-14T19:55:42,844 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:55:42,844 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:55:42,845 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:55:42,846 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:55:42,847 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810080, jitterRate=0.030070677399635315}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:55:42,848 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:55:42,848 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731614142834Writing region info on filesystem at 1731614142834Initializing all the Stores at 1731614142835 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142835Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142835Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614142835Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614142835Cleaning up temporary data from old regions at 1731614142844 (+9 ms)Running coprocessor post-open hooks at 1731614142848 (+4 ms)Region opened successfully at 1731614142848 2024-11-14T19:55:42,850 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731614142812 2024-11-14T19:55:42,854 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:55:42,855 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:55:42,855 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,856 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,36939,1731614141870, state=OPEN 2024-11-14T19:55:42,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:55:42,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:55:42,907 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:55:42,907 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:42,908 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:55:42,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:55:42,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,36939,1731614141870 in 247 msec 2024-11-14T19:55:42,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:55:42,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 676 msec 2024-11-14T19:55:42,915 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:55:42,915 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:55:42,917 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:55:42,917 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,36939,1731614141870, seqNum=-1] 2024-11-14T19:55:42,917 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:55:42,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35161, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:55:42,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 741 msec 2024-11-14T19:55:42,926 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731614142926, completionTime=-1 2024-11-14T19:55:42,926 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:55:42,926 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:55:42,929 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:55:42,929 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731614202929 2024-11-14T19:55:42,929 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614262929 2024-11-14T19:55:42,929 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-14T19:55:42,930 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44359,1731614141733-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,930 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44359,1731614141733-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,930 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44359,1731614141733-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,930 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:44359, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,930 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,931 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,936 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:55:42,939 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.010sec 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44359,1731614141733-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:55:42,940 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44359,1731614141733-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:55:42,943 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:55:42,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:55:42,943 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,44359,1731614141733-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:55:42,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21d73b2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:55:42,997 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,44359,-1 for getting cluster id 2024-11-14T19:55:42,998 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:55:43,001 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba87bd9f-769a-42bc-a51d-e3369d597971' 2024-11-14T19:55:43,001 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:55:43,001 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba87bd9f-769a-42bc-a51d-e3369d597971" 2024-11-14T19:55:43,001 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4176bc39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:55:43,001 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,44359,-1] 2024-11-14T19:55:43,002 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:55:43,002 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:55:43,003 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47656, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:55:43,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7f1ef4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:55:43,004 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:55:43,006 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,36939,1731614141870, seqNum=-1] 2024-11-14T19:55:43,006 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:55:43,007 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43800, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:55:43,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,44359,1731614141733 2024-11-14T19:55:43,010 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:55:43,013 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:55:43,014 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T19:55:43,015 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 867b237d0fa7,44359,1731614141733 2024-11-14T19:55:43,015 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6b640088 2024-11-14T19:55:43,015 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T19:55:43,017 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47664, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T19:55:43,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T19:55:43,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T19:55:43,018 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:55:43,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-14T19:55:43,021 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T19:55:43,021 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:43,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-14T19:55:43,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:55:43,023 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T19:55:43,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741835_1011 (size=381) 2024-11-14T19:55:43,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741835_1011 (size=381) 2024-11-14T19:55:43,039 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 229a9f60e73dc7c25a0778695d1fe42e, NAME => 'TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66 2024-11-14T19:55:43,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741836_1012 (size=64) 2024-11-14T19:55:43,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741836_1012 (size=64) 2024-11-14T19:55:43,047 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:55:43,047 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 229a9f60e73dc7c25a0778695d1fe42e, disabling compactions & flushes 2024-11-14T19:55:43,047 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,047 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,047 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. after waiting 0 ms 2024-11-14T19:55:43,047 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,047 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,047 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 229a9f60e73dc7c25a0778695d1fe42e: Waiting for close lock at 1731614143047Disabling compacts and flushes for region at 1731614143047Disabling writes for close at 1731614143047Writing region close event to WAL at 1731614143047Closed at 1731614143047 2024-11-14T19:55:43,049 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T19:55:43,049 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731614143049"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731614143049"}]},"ts":"1731614143049"} 2024-11-14T19:55:43,051 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T19:55:43,053 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T19:55:43,053 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614143053"}]},"ts":"1731614143053"} 2024-11-14T19:55:43,056 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-14T19:55:43,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, ASSIGN}] 2024-11-14T19:55:43,057 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, ASSIGN 2024-11-14T19:55:43,059 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, ASSIGN; state=OFFLINE, location=867b237d0fa7,36939,1731614141870; forceNewPlan=false, retain=false 2024-11-14T19:55:43,209 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=229a9f60e73dc7c25a0778695d1fe42e, regionState=OPENING, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:43,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, ASSIGN because future has completed 2024-11-14T19:55:43,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870}] 2024-11-14T19:55:43,370 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,370 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 229a9f60e73dc7c25a0778695d1fe42e, NAME => 'TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:55:43,370 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,370 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:55:43,370 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,371 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,372 INFO [StoreOpener-229a9f60e73dc7c25a0778695d1fe42e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,373 INFO [StoreOpener-229a9f60e73dc7c25a0778695d1fe42e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 229a9f60e73dc7c25a0778695d1fe42e columnFamilyName info 2024-11-14T19:55:43,373 DEBUG [StoreOpener-229a9f60e73dc7c25a0778695d1fe42e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:55:43,374 INFO [StoreOpener-229a9f60e73dc7c25a0778695d1fe42e-1 {}] regionserver.HStore(327): Store=229a9f60e73dc7c25a0778695d1fe42e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:55:43,374 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,375 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,375 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,375 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,375 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,377 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,379 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:55:43,381 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 229a9f60e73dc7c25a0778695d1fe42e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877171, jitterRate=0.11538106203079224}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:55:43,381 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:43,382 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 229a9f60e73dc7c25a0778695d1fe42e: Running coprocessor pre-open hook at 1731614143371Writing region info on filesystem at 1731614143371Initializing all the Stores at 1731614143371Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614143372 (+1 ms)Cleaning up temporary data from old regions at 1731614143375 (+3 ms)Running coprocessor post-open hooks at 1731614143381 (+6 ms)Region opened successfully at 1731614143382 (+1 ms) 2024-11-14T19:55:43,383 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., pid=6, masterSystemTime=1731614143366 2024-11-14T19:55:43,386 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,386 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:43,387 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=229a9f60e73dc7c25a0778695d1fe42e, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:43,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 because future has completed 2024-11-14T19:55:43,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T19:55:43,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 in 177 msec 2024-11-14T19:55:43,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T19:55:43,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, ASSIGN in 337 msec 2024-11-14T19:55:43,397 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T19:55:43,397 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731614143397"}]},"ts":"1731614143397"} 2024-11-14T19:55:43,399 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-14T19:55:43,401 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T19:55:43,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 383 msec 2024-11-14T19:55:43,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:43,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:44,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:44,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:44,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,670 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:44,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,179 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:55:45,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:55:45,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:45,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:46,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:46,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:47,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:47,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:48,250 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T19:55:48,250 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-14T19:55:48,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:48,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:49,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:49,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:50,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:50,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:50,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T19:55:50,669 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T19:55:50,669 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:55:50,669 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T19:55:50,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T19:55:50,670 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T19:55:50,670 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T19:55:50,670 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T19:55:51,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:51,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:52,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:52,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44359 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T19:55:53,077 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-14T19:55:53,077 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-14T19:55:53,080 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-14T19:55:53,080 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:53,083 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2] 2024-11-14T19:55:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:53,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 229a9f60e73dc7c25a0778695d1fe42e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:55:53,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/26a74d6f8d094bafbd59b14f93a051c0 is 1080, key is row0001/info:/1731614153084/Put/seqid=0 2024-11-14T19:55:53,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741837_1013 (size=12509) 2024-11-14T19:55:53,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741837_1013 (size=12509) 2024-11-14T19:55:53,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/26a74d6f8d094bafbd59b14f93a051c0 2024-11-14T19:55:53,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/26a74d6f8d094bafbd59b14f93a051c0 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/26a74d6f8d094bafbd59b14f93a051c0 2024-11-14T19:55:53,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/26a74d6f8d094bafbd59b14f93a051c0, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T19:55:53,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 229a9f60e73dc7c25a0778695d1fe42e in 40ms, sequenceid=11, compaction requested=false 2024-11-14T19:55:53,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:55:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:53,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 229a9f60e73dc7c25a0778695d1fe42e 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-14T19:55:53,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/c59071ae55554ca09369ba3519e495bd is 1080, key is row0008/info:/1731614153101/Put/seqid=0 2024-11-14T19:55:53,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741838_1014 (size=25453) 2024-11-14T19:55:53,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741838_1014 (size=25453) 2024-11-14T19:55:53,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/c59071ae55554ca09369ba3519e495bd 2024-11-14T19:55:53,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/c59071ae55554ca09369ba3519e495bd as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd 2024-11-14T19:55:53,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd, entries=19, sequenceid=33, filesize=24.9 K 2024-11-14T19:55:53,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for 229a9f60e73dc7c25a0778695d1fe42e in 24ms, sequenceid=33, compaction requested=false 2024-11-14T19:55:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:55:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-14T19:55:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:55:53,165 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd because midkey is the same as first or last row 2024-11-14T19:55:53,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:53,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:54,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:54,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:55,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 229a9f60e73dc7c25a0778695d1fe42e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:55:55,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/51b4a1fd1f21446c91a8c62154a363c2 is 1080, key is row0027/info:/1731614153142/Put/seqid=0 2024-11-14T19:55:55,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741839_1015 (size=12509) 2024-11-14T19:55:55,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741839_1015 (size=12509) 2024-11-14T19:55:55,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/51b4a1fd1f21446c91a8c62154a363c2 2024-11-14T19:55:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/51b4a1fd1f21446c91a8c62154a363c2 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/51b4a1fd1f21446c91a8c62154a363c2 2024-11-14T19:55:55,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/51b4a1fd1f21446c91a8c62154a363c2, entries=7, sequenceid=43, filesize=12.2 K 2024-11-14T19:55:55,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 229a9f60e73dc7c25a0778695d1fe42e in 27ms, sequenceid=43, compaction requested=true 2024-11-14T19:55:55,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:55:55,187 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-14T19:55:55,187 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:55:55,188 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd because midkey is the same as first or last row 2024-11-14T19:55:55,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 229a9f60e73dc7c25a0778695d1fe42e:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:55:55,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:55:55,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:55:55,188 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:55:55,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 229a9f60e73dc7c25a0778695d1fe42e 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T19:55:55,190 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:55:55,190 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 229a9f60e73dc7c25a0778695d1fe42e/info is initiating minor compaction (all files) 2024-11-14T19:55:55,190 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 229a9f60e73dc7c25a0778695d1fe42e/info in TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:55:55,190 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/26a74d6f8d094bafbd59b14f93a051c0, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/51b4a1fd1f21446c91a8c62154a363c2] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp, totalSize=49.3 K 2024-11-14T19:55:55,191 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26a74d6f8d094bafbd59b14f93a051c0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731614153084 2024-11-14T19:55:55,191 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting c59071ae55554ca09369ba3519e495bd, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1731614153101 2024-11-14T19:55:55,192 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 51b4a1fd1f21446c91a8c62154a363c2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731614153142 2024-11-14T19:55:55,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/7b78065db2ba471fbca652ca186b54d5 is 1080, key is row0034/info:/1731614155162/Put/seqid=0 2024-11-14T19:55:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741840_1016 (size=16817) 2024-11-14T19:55:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741840_1016 (size=16817) 2024-11-14T19:55:55,219 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 229a9f60e73dc7c25a0778695d1fe42e#info#compaction#58 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:55:55,220 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/a66f5fda58ba4efb82941cb6cf292917 is 1080, key is row0001/info:/1731614153084/Put/seqid=0 2024-11-14T19:55:55,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/7b78065db2ba471fbca652ca186b54d5 2024-11-14T19:55:55,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741841_1017 (size=40670) 2024-11-14T19:55:55,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741841_1017 (size=40670) 2024-11-14T19:55:55,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/7b78065db2ba471fbca652ca186b54d5 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b78065db2ba471fbca652ca186b54d5 2024-11-14T19:55:55,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b78065db2ba471fbca652ca186b54d5, entries=11, sequenceid=57, filesize=16.4 K 2024-11-14T19:55:55,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T19:55:55,236 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/a66f5fda58ba4efb82941cb6cf292917 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 2024-11-14T19:55:55,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=18.91 KB/19368 for 229a9f60e73dc7c25a0778695d1fe42e in 49ms, sequenceid=57, compaction requested=false 2024-11-14T19:55:55,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:55:55,238 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-14T19:55:55,238 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:55:55,238 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd because midkey is the same as first or last row 2024-11-14T19:55:55,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43800 deadline: 1731614165234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 2024-11-14T19:55:55,242 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:55:55,243 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:55:55,243 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2 because the exception is null or not the one we care about 2024-11-14T19:55:55,244 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 229a9f60e73dc7c25a0778695d1fe42e/info of 229a9f60e73dc7c25a0778695d1fe42e into a66f5fda58ba4efb82941cb6cf292917(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:55:55,244 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., storeName=229a9f60e73dc7c25a0778695d1fe42e/info, priority=13, startTime=1731614155188; duration=0sec 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 because midkey is the same as first or last row 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:55:55,244 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 because midkey is the same as first or last row 2024-11-14T19:55:55,245 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-14T19:55:55,245 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:55:55,245 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 because midkey is the same as first or last row 2024-11-14T19:55:55,245 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:55:55,245 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 229a9f60e73dc7c25a0778695d1fe42e:info 2024-11-14T19:55:55,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:55,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:56,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:56,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:57,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:57,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:58,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:58,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:59,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:55:59,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:00,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:00,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:01,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:01,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:02,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:02,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:03,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:03,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:04,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:04,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:05,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 229a9f60e73dc7c25a0778695d1fe42e 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-14T19:56:05,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/7b253c8bcb8442bd83962c6808fd5b70 is 1080, key is row0045/info:/1731614155190/Put/seqid=0 2024-11-14T19:56:05,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741842_1018 (size=25453) 2024-11-14T19:56:05,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741842_1018 (size=25453) 2024-11-14T19:56:05,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/7b253c8bcb8442bd83962c6808fd5b70 2024-11-14T19:56:05,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/7b253c8bcb8442bd83962c6808fd5b70 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b253c8bcb8442bd83962c6808fd5b70 2024-11-14T19:56:05,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b253c8bcb8442bd83962c6808fd5b70, entries=19, sequenceid=80, filesize=24.9 K 2024-11-14T19:56:05,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=1.05 KB/1076 for 229a9f60e73dc7c25a0778695d1fe42e in 32ms, sequenceid=80, compaction requested=true 2024-11-14T19:56:05,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:56:05,372 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-14T19:56:05,372 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:56:05,372 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 because midkey is the same as first or last row 2024-11-14T19:56:05,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 229a9f60e73dc7c25a0778695d1fe42e:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:05,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:05,372 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:05,373 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82940 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:05,373 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 229a9f60e73dc7c25a0778695d1fe42e/info is initiating minor compaction (all files) 2024-11-14T19:56:05,373 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 229a9f60e73dc7c25a0778695d1fe42e/info in TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:56:05,373 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b78065db2ba471fbca652ca186b54d5, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b253c8bcb8442bd83962c6808fd5b70] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp, totalSize=81.0 K 2024-11-14T19:56:05,374 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting a66f5fda58ba4efb82941cb6cf292917, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731614153084 2024-11-14T19:56:05,374 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b78065db2ba471fbca652ca186b54d5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1731614155162 2024-11-14T19:56:05,374 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b253c8bcb8442bd83962c6808fd5b70, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731614155190 2024-11-14T19:56:05,388 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 229a9f60e73dc7c25a0778695d1fe42e#info#compaction#60 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:05,389 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/ad4bd4d6ea6c429a9d3c4963a6932f2a is 1080, key is row0001/info:/1731614153084/Put/seqid=0 2024-11-14T19:56:05,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741843_1019 (size=73224) 2024-11-14T19:56:05,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741843_1019 (size=73224) 2024-11-14T19:56:05,400 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/ad4bd4d6ea6c429a9d3c4963a6932f2a as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a 2024-11-14T19:56:05,406 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 229a9f60e73dc7c25a0778695d1fe42e/info of 229a9f60e73dc7c25a0778695d1fe42e into ad4bd4d6ea6c429a9d3c4963a6932f2a(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:05,406 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 229a9f60e73dc7c25a0778695d1fe42e: 2024-11-14T19:56:05,407 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., storeName=229a9f60e73dc7c25a0778695d1fe42e/info, priority=13, startTime=1731614165372; duration=0sec 2024-11-14T19:56:05,407 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-14T19:56:05,407 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:56:05,407 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-14T19:56:05,407 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:56:05,407 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-14T19:56:05,407 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T19:56:05,408 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:05,408 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:05,408 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 229a9f60e73dc7c25a0778695d1fe42e:info 2024-11-14T19:56:05,409 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44359 {}] assignment.AssignmentManager(1355): Split request from 867b237d0fa7,36939,1731614141870, parent={ENCODED => 229a9f60e73dc7c25a0778695d1fe42e, NAME => 'TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-14T19:56:05,415 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44359 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:05,419 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44359 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=229a9f60e73dc7c25a0778695d1fe42e, daughterA=b358000d261efa4ae5748383aa975bac, daughterB=31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:05,420 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=229a9f60e73dc7c25a0778695d1fe42e, daughterA=b358000d261efa4ae5748383aa975bac, daughterB=31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:05,420 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=229a9f60e73dc7c25a0778695d1fe42e, daughterA=b358000d261efa4ae5748383aa975bac, daughterB=31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:05,420 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=229a9f60e73dc7c25a0778695d1fe42e, daughterA=b358000d261efa4ae5748383aa975bac, daughterB=31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:05,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, UNASSIGN}] 2024-11-14T19:56:05,429 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, UNASSIGN 2024-11-14T19:56:05,431 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=229a9f60e73dc7c25a0778695d1fe42e, regionState=CLOSING, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:05,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, UNASSIGN because future has completed 2024-11-14T19:56:05,433 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-14T19:56:05,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870}] 2024-11-14T19:56:05,591 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,591 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-14T19:56:05,592 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 229a9f60e73dc7c25a0778695d1fe42e, disabling compactions & flushes 2024-11-14T19:56:05,592 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:56:05,592 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:56:05,592 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. after waiting 0 ms 2024-11-14T19:56:05,592 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:56:05,592 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 229a9f60e73dc7c25a0778695d1fe42e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:56:05,596 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/5d8b7911a272442384de9763a52e4b66 is 1080, key is row0064/info:/1731614165341/Put/seqid=0 2024-11-14T19:56:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741844_1020 (size=6033) 2024-11-14T19:56:05,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741844_1020 (size=6033) 2024-11-14T19:56:05,602 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/5d8b7911a272442384de9763a52e4b66 2024-11-14T19:56:05,608 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/.tmp/info/5d8b7911a272442384de9763a52e4b66 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/5d8b7911a272442384de9763a52e4b66 2024-11-14T19:56:05,614 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/5d8b7911a272442384de9763a52e4b66, entries=1, sequenceid=85, filesize=5.9 K 2024-11-14T19:56:05,615 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 229a9f60e73dc7c25a0778695d1fe42e in 23ms, sequenceid=85, compaction requested=false 2024-11-14T19:56:05,616 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/26a74d6f8d094bafbd59b14f93a051c0, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/51b4a1fd1f21446c91a8c62154a363c2, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b78065db2ba471fbca652ca186b54d5, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b253c8bcb8442bd83962c6808fd5b70] to archive 2024-11-14T19:56:05,617 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T19:56:05,619 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/26a74d6f8d094bafbd59b14f93a051c0 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/26a74d6f8d094bafbd59b14f93a051c0 2024-11-14T19:56:05,621 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/c59071ae55554ca09369ba3519e495bd 2024-11-14T19:56:05,622 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/a66f5fda58ba4efb82941cb6cf292917 2024-11-14T19:56:05,623 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/51b4a1fd1f21446c91a8c62154a363c2 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/51b4a1fd1f21446c91a8c62154a363c2 2024-11-14T19:56:05,624 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b78065db2ba471fbca652ca186b54d5 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b78065db2ba471fbca652ca186b54d5 2024-11-14T19:56:05,625 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b253c8bcb8442bd83962c6808fd5b70 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/7b253c8bcb8442bd83962c6808fd5b70 2024-11-14T19:56:05,632 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-14T19:56:05,632 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. 2024-11-14T19:56:05,633 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 229a9f60e73dc7c25a0778695d1fe42e: Waiting for close lock at 1731614165592Running coprocessor pre-close hooks at 1731614165592Disabling compacts and flushes for region at 1731614165592Disabling writes for close at 1731614165592Obtaining lock to block concurrent updates at 1731614165592Preparing flush snapshotting stores in 229a9f60e73dc7c25a0778695d1fe42e at 1731614165592Finished memstore snapshotting TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731614165593 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. at 1731614165593Flushing 229a9f60e73dc7c25a0778695d1fe42e/info: creating writer at 1731614165593Flushing 229a9f60e73dc7c25a0778695d1fe42e/info: appending metadata at 1731614165596 (+3 ms)Flushing 229a9f60e73dc7c25a0778695d1fe42e/info: closing flushed file at 1731614165596Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bf30444: reopening flushed file at 1731614165607 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 229a9f60e73dc7c25a0778695d1fe42e in 23ms, sequenceid=85, compaction requested=false at 1731614165615 (+8 ms)Writing region close event to WAL at 1731614165628 (+13 ms)Running coprocessor post-close hooks at 1731614165632 (+4 ms)Closed at 1731614165632 2024-11-14T19:56:05,635 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,635 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=229a9f60e73dc7c25a0778695d1fe42e, regionState=CLOSED 2024-11-14T19:56:05,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 because future has completed 2024-11-14T19:56:05,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-14T19:56:05,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 229a9f60e73dc7c25a0778695d1fe42e, server=867b237d0fa7,36939,1731614141870 in 205 msec 2024-11-14T19:56:05,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T19:56:05,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=229a9f60e73dc7c25a0778695d1fe42e, UNASSIGN in 213 msec 2024-11-14T19:56:05,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:05,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:05,652 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:56:05,655 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=229a9f60e73dc7c25a0778695d1fe42e, threads=2 2024-11-14T19:56:05,657 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/5d8b7911a272442384de9763a52e4b66 for region: 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,658 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a for region: 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,667 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/5d8b7911a272442384de9763a52e4b66, top=true 2024-11-14T19:56:05,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741845_1021 (size=27) 2024-11-14T19:56:05,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741845_1021 (size=27) 2024-11-14T19:56:05,675 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66 for child: 31c109b10941d0ea69361a85692887e8, parent: 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,675 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/5d8b7911a272442384de9763a52e4b66 for region: 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741846_1022 (size=27) 2024-11-14T19:56:05,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741846_1022 (size=27) 2024-11-14T19:56:05,683 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a for region: 229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:56:05,685 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 229a9f60e73dc7c25a0778695d1fe42e Daughter A: [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e] storefiles, Daughter B: [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e] storefiles. 2024-11-14T19:56:05,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741847_1023 (size=71) 2024-11-14T19:56:05,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741847_1023 (size=71) 2024-11-14T19:56:05,695 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:56:05,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741848_1024 (size=71) 2024-11-14T19:56:05,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741848_1024 (size=71) 2024-11-14T19:56:05,708 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:56:05,719 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-14T19:56:05,721 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-14T19:56:05,724 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731614165724"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731614165724"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731614165724"}]},"ts":"1731614165724"} 2024-11-14T19:56:05,725 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731614165724"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731614165724"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731614165724"}]},"ts":"1731614165724"} 2024-11-14T19:56:05,725 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731614165724"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731614165724"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731614165724"}]},"ts":"1731614165724"} 2024-11-14T19:56:05,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b358000d261efa4ae5748383aa975bac, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31c109b10941d0ea69361a85692887e8, ASSIGN}] 2024-11-14T19:56:05,746 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b358000d261efa4ae5748383aa975bac, ASSIGN 2024-11-14T19:56:05,746 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31c109b10941d0ea69361a85692887e8, ASSIGN 2024-11-14T19:56:05,747 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b358000d261efa4ae5748383aa975bac, ASSIGN; state=SPLITTING_NEW, location=867b237d0fa7,36939,1731614141870; forceNewPlan=false, retain=false 2024-11-14T19:56:05,747 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31c109b10941d0ea69361a85692887e8, ASSIGN; state=SPLITTING_NEW, location=867b237d0fa7,36939,1731614141870; forceNewPlan=false, retain=false 2024-11-14T19:56:05,898 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=31c109b10941d0ea69361a85692887e8, regionState=OPENING, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:05,898 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=b358000d261efa4ae5748383aa975bac, regionState=OPENING, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:05,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b358000d261efa4ae5748383aa975bac, ASSIGN because future has completed 2024-11-14T19:56:05,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure b358000d261efa4ae5748383aa975bac, server=867b237d0fa7,36939,1731614141870}] 2024-11-14T19:56:05,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31c109b10941d0ea69361a85692887e8, ASSIGN because future has completed 2024-11-14T19:56:05,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870}] 2024-11-14T19:56:06,058 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:56:06,058 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => b358000d261efa4ae5748383aa975bac, NAME => 'TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-14T19:56:06,059 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,059 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:56:06,059 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,059 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,061 INFO [StoreOpener-b358000d261efa4ae5748383aa975bac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,062 INFO [StoreOpener-b358000d261efa4ae5748383aa975bac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b358000d261efa4ae5748383aa975bac columnFamilyName info 2024-11-14T19:56:06,062 DEBUG [StoreOpener-b358000d261efa4ae5748383aa975bac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:56:06,076 DEBUG [StoreOpener-b358000d261efa4ae5748383aa975bac-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e->hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a-bottom 2024-11-14T19:56:06,077 INFO [StoreOpener-b358000d261efa4ae5748383aa975bac-1 {}] regionserver.HStore(327): Store=b358000d261efa4ae5748383aa975bac/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:56:06,077 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,078 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,079 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,080 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,080 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,081 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,082 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened b358000d261efa4ae5748383aa975bac; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826641, jitterRate=0.05112847685813904}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:56:06,082 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b358000d261efa4ae5748383aa975bac 2024-11-14T19:56:06,083 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for b358000d261efa4ae5748383aa975bac: Running coprocessor pre-open hook at 1731614166059Writing region info on filesystem at 1731614166059Initializing all the Stores at 1731614166060 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614166060Cleaning up temporary data from old regions at 1731614166080 (+20 ms)Running coprocessor post-open hooks at 1731614166082 (+2 ms)Region opened successfully at 1731614166083 (+1 ms) 2024-11-14T19:56:06,084 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac., pid=12, masterSystemTime=1731614166053 2024-11-14T19:56:06,084 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store b358000d261efa4ae5748383aa975bac:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:06,084 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:06,084 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-14T19:56:06,085 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:56:06,085 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): b358000d261efa4ae5748383aa975bac/info is initiating minor compaction (all files) 2024-11-14T19:56:06,085 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b358000d261efa4ae5748383aa975bac/info in TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:56:06,085 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e->hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a-bottom] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/.tmp, totalSize=71.5 K 2024-11-14T19:56:06,086 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731614153084 2024-11-14T19:56:06,086 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:56:06,086 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:56:06,087 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:06,087 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 31c109b10941d0ea69361a85692887e8, NAME => 'TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-14T19:56:06,087 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,087 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:56:06,087 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,087 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,087 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=b358000d261efa4ae5748383aa975bac, regionState=OPEN, openSeqNum=89, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:06,088 INFO [StoreOpener-31c109b10941d0ea69361a85692887e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,089 INFO [StoreOpener-31c109b10941d0ea69361a85692887e8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31c109b10941d0ea69361a85692887e8 columnFamilyName info 2024-11-14T19:56:06,089 DEBUG [StoreOpener-31c109b10941d0ea69361a85692887e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:56:06,089 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-14T19:56:06,090 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-14T19:56:06,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure b358000d261efa4ae5748383aa975bac, server=867b237d0fa7,36939,1731614141870 because future has completed 2024-11-14T19:56:06,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-14T19:56:06,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-14T19:56:06,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure b358000d261efa4ae5748383aa975bac, server=867b237d0fa7,36939,1731614141870 in 190 msec 2024-11-14T19:56:06,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b358000d261efa4ae5748383aa975bac, ASSIGN in 349 msec 2024-11-14T19:56:06,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/info/f141c170baf6458889cb48cbebef754c is 193, key is TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8./info:regioninfo/1731614165898/Put/seqid=0 2024-11-14T19:56:06,107 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b358000d261efa4ae5748383aa975bac#info#compaction#62 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:06,108 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/.tmp/info/fdb4e4e6ab0f40578f090c2a481963e2 is 1080, key is row0001/info:/1731614153084/Put/seqid=0 2024-11-14T19:56:06,114 DEBUG [StoreOpener-31c109b10941d0ea69361a85692887e8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66 2024-11-14T19:56:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741850_1026 (size=70862) 2024-11-14T19:56:06,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741850_1026 (size=70862) 2024-11-14T19:56:06,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741849_1025 (size=9847) 2024-11-14T19:56:06,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741849_1025 (size=9847) 2024-11-14T19:56:06,122 DEBUG [StoreOpener-31c109b10941d0ea69361a85692887e8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e->hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a-top 2024-11-14T19:56:06,122 INFO [StoreOpener-31c109b10941d0ea69361a85692887e8-1 {}] regionserver.HStore(327): Store=31c109b10941d0ea69361a85692887e8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:56:06,123 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,123 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,124 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/.tmp/info/fdb4e4e6ab0f40578f090c2a481963e2 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/fdb4e4e6ab0f40578f090c2a481963e2 2024-11-14T19:56:06,125 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,125 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,125 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,127 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,128 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 31c109b10941d0ea69361a85692887e8; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743169, jitterRate=-0.055012404918670654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T19:56:06,128 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:06,128 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 31c109b10941d0ea69361a85692887e8: Running coprocessor pre-open hook at 1731614166087Writing region info on filesystem at 1731614166087Initializing all the Stores at 1731614166088 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614166088Cleaning up temporary data from old regions at 1731614166125 (+37 ms)Running coprocessor post-open hooks at 1731614166128 (+3 ms)Region opened successfully at 1731614166128 2024-11-14T19:56:06,129 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., pid=13, masterSystemTime=1731614166053 2024-11-14T19:56:06,129 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 2 2024-11-14T19:56:06,129 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:06,129 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-14T19:56:06,130 INFO [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:06,130 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:06,130 INFO [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:06,131 INFO [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e->hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a-top, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=77.4 K 2024-11-14T19:56:06,131 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in b358000d261efa4ae5748383aa975bac/info of b358000d261efa4ae5748383aa975bac into fdb4e4e6ab0f40578f090c2a481963e2(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:06,131 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b358000d261efa4ae5748383aa975bac: 2024-11-14T19:56:06,131 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac., storeName=b358000d261efa4ae5748383aa975bac/info, priority=15, startTime=1731614166084; duration=0sec 2024-11-14T19:56:06,131 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:06,131 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b358000d261efa4ae5748383aa975bac:info 2024-11-14T19:56:06,131 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] compactions.Compactor(225): Compacting ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731614153084 2024-11-14T19:56:06,132 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731614165341 2024-11-14T19:56:06,132 DEBUG [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:06,132 INFO [RS_OPEN_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:06,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=31c109b10941d0ea69361a85692887e8, regionState=OPEN, openSeqNum=89, regionLocation=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:06,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 because future has completed 2024-11-14T19:56:06,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-14T19:56:06,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 in 238 msec 2024-11-14T19:56:06,145 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-14T19:56:06,145 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=31c109b10941d0ea69361a85692887e8, ASSIGN in 398 msec 2024-11-14T19:56:06,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=229a9f60e73dc7c25a0778695d1fe42e, daughterA=b358000d261efa4ae5748383aa975bac, daughterB=31c109b10941d0ea69361a85692887e8 in 730 msec 2024-11-14T19:56:06,156 INFO [RS:0;867b237d0fa7:36939-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#64 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:06,156 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/c0a71bae47ae497280fdfabbbf96ae1c is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:06,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741851_1027 (size=8359) 2024-11-14T19:56:06,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741851_1027 (size=8359) 2024-11-14T19:56:06,168 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/c0a71bae47ae497280fdfabbbf96ae1c as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c0a71bae47ae497280fdfabbbf96ae1c 2024-11-14T19:56:06,175 INFO [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into c0a71bae47ae497280fdfabbbf96ae1c(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:06,175 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:06,175 INFO [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=14, startTime=1731614166129; duration=0sec 2024-11-14T19:56:06,175 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:06,175 DEBUG [RS:0;867b237d0fa7:36939-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:06,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/info/f141c170baf6458889cb48cbebef754c 2024-11-14T19:56:06,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/ns/f03411671557470e8e8c537b2e83dd37 is 43, key is default/ns:d/1731614142920/Put/seqid=0 2024-11-14T19:56:06,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741852_1028 (size=5153) 2024-11-14T19:56:06,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741852_1028 (size=5153) 2024-11-14T19:56:06,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/ns/f03411671557470e8e8c537b2e83dd37 2024-11-14T19:56:06,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/table/d9d042a5928a444b951a3905eb774bae is 65, key is TestLogRolling-testLogRolling/table:state/1731614143397/Put/seqid=0 2024-11-14T19:56:06,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741853_1029 (size=5340) 2024-11-14T19:56:06,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741853_1029 (size=5340) 2024-11-14T19:56:06,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/table/d9d042a5928a444b951a3905eb774bae 2024-11-14T19:56:06,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/info/f141c170baf6458889cb48cbebef754c as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/info/f141c170baf6458889cb48cbebef754c 2024-11-14T19:56:06,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/info/f141c170baf6458889cb48cbebef754c, entries=30, sequenceid=17, filesize=9.6 K 2024-11-14T19:56:06,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/ns/f03411671557470e8e8c537b2e83dd37 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/ns/f03411671557470e8e8c537b2e83dd37 2024-11-14T19:56:06,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/ns/f03411671557470e8e8c537b2e83dd37, entries=2, sequenceid=17, filesize=5.0 K 2024-11-14T19:56:06,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/table/d9d042a5928a444b951a3905eb774bae as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/table/d9d042a5928a444b951a3905eb774bae 2024-11-14T19:56:06,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/table/d9d042a5928a444b951a3905eb774bae, entries=2, sequenceid=17, filesize=5.2 K 2024-11-14T19:56:06,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 510ms, sequenceid=17, compaction requested=false 2024-11-14T19:56:06,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T19:56:06,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:06,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:07,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43800 deadline: 1731614177344, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. is not online on 867b237d0fa7,36939,1731614141870 2024-11-14T19:56:07,345 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. is not online on 867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:07,345 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e. is not online on 867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:07,345 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731614143017.229a9f60e73dc7c25a0778695d1fe42e., hostname=867b237d0fa7,36939,1731614141870, seqNum=2 from cache 2024-11-14T19:56:07,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:07,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:08,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:08,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:09,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:09,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:10,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:10,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:10,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:10,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,173 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:56:11,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:11,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:11,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:11,708 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:56:12,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:12,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:13,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:13,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:14,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:14,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:15,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:15,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:16,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:16,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:17,444 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89] 2024-11-14T19:56:17,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:17,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:56:17,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/20874f2d65ec439a95639bbbb9b84786 is 1080, key is row0065/info:/1731614177445/Put/seqid=0 2024-11-14T19:56:17,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741854_1030 (size=12509) 2024-11-14T19:56:17,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741854_1030 (size=12509) 2024-11-14T19:56:17,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/20874f2d65ec439a95639bbbb9b84786 2024-11-14T19:56:17,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/20874f2d65ec439a95639bbbb9b84786 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/20874f2d65ec439a95639bbbb9b84786 2024-11-14T19:56:17,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/20874f2d65ec439a95639bbbb9b84786, entries=7, sequenceid=99, filesize=12.2 K 2024-11-14T19:56:17,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 31c109b10941d0ea69361a85692887e8 in 29ms, sequenceid=99, compaction requested=false 2024-11-14T19:56:17,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:17,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:17,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T19:56:17,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/b6da87ad2af74c30b81fd4a41cc15a87 is 1080, key is row0072/info:/1731614177460/Put/seqid=0 2024-11-14T19:56:17,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741855_1031 (size=18987) 2024-11-14T19:56:17,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741855_1031 (size=18987) 2024-11-14T19:56:17,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/b6da87ad2af74c30b81fd4a41cc15a87 2024-11-14T19:56:17,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/b6da87ad2af74c30b81fd4a41cc15a87 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/b6da87ad2af74c30b81fd4a41cc15a87 2024-11-14T19:56:17,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/b6da87ad2af74c30b81fd4a41cc15a87, entries=13, sequenceid=115, filesize=18.5 K 2024-11-14T19:56:17,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for 31c109b10941d0ea69361a85692887e8 in 29ms, sequenceid=115, compaction requested=true 2024-11-14T19:56:17,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:17,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:17,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:17,519 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:17,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:17,520 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39855 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:17,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T19:56:17,520 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:17,520 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:17,521 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c0a71bae47ae497280fdfabbbf96ae1c, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/20874f2d65ec439a95639bbbb9b84786, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/b6da87ad2af74c30b81fd4a41cc15a87] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=38.9 K 2024-11-14T19:56:17,521 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting c0a71bae47ae497280fdfabbbf96ae1c, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731614155231 2024-11-14T19:56:17,522 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20874f2d65ec439a95639bbbb9b84786, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731614177445 2024-11-14T19:56:17,523 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting b6da87ad2af74c30b81fd4a41cc15a87, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731614177460 2024-11-14T19:56:17,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/bc9b2f414d9d47d8b06ce555c9e9a39f is 1080, key is row0085/info:/1731614177491/Put/seqid=0 2024-11-14T19:56:17,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741856_1032 (size=17895) 2024-11-14T19:56:17,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741856_1032 (size=17895) 2024-11-14T19:56:17,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/bc9b2f414d9d47d8b06ce555c9e9a39f 2024-11-14T19:56:17,535 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#70 average throughput is 23.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:17,535 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/487ca577e99d4ac4b4d85146c30ebd00 is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:17,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/bc9b2f414d9d47d8b06ce555c9e9a39f as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bc9b2f414d9d47d8b06ce555c9e9a39f 2024-11-14T19:56:17,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741857_1033 (size=30029) 2024-11-14T19:56:17,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bc9b2f414d9d47d8b06ce555c9e9a39f, entries=12, sequenceid=130, filesize=17.5 K 2024-11-14T19:56:17,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741857_1033 (size=30029) 2024-11-14T19:56:17,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for 31c109b10941d0ea69361a85692887e8 in 26ms, sequenceid=130, compaction requested=false 2024-11-14T19:56:17,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:17,553 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/487ca577e99d4ac4b4d85146c30ebd00 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/487ca577e99d4ac4b4d85146c30ebd00 2024-11-14T19:56:17,560 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into 487ca577e99d4ac4b4d85146c30ebd00(size=29.3 K), total size for store is 46.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:17,560 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:17,560 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614177519; duration=0sec 2024-11-14T19:56:17,560 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:17,560 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:17,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:17,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:18,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:18,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:19,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:19,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:56:19,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/5e5e0ed0cdd54973b5e91c21904d5c64 is 1080, key is row0097/info:/1731614179522/Put/seqid=0 2024-11-14T19:56:19,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741858_1034 (size=12516) 2024-11-14T19:56:19,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741858_1034 (size=12516) 2024-11-14T19:56:19,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/5e5e0ed0cdd54973b5e91c21904d5c64 2024-11-14T19:56:19,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/5e5e0ed0cdd54973b5e91c21904d5c64 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5e5e0ed0cdd54973b5e91c21904d5c64 2024-11-14T19:56:19,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5e5e0ed0cdd54973b5e91c21904d5c64, entries=7, sequenceid=141, filesize=12.2 K 2024-11-14T19:56:19,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 31c109b10941d0ea69361a85692887e8 in 26ms, sequenceid=141, compaction requested=true 2024-11-14T19:56:19,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:19,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:19,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:19,564 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:19,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:19,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T19:56:19,565 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 60440 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:19,565 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:19,565 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:19,566 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/487ca577e99d4ac4b4d85146c30ebd00, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bc9b2f414d9d47d8b06ce555c9e9a39f, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5e5e0ed0cdd54973b5e91c21904d5c64] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=59.0 K 2024-11-14T19:56:19,566 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 487ca577e99d4ac4b4d85146c30ebd00, keycount=23, bloomtype=ROW, size=29.3 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731614155231 2024-11-14T19:56:19,566 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting bc9b2f414d9d47d8b06ce555c9e9a39f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1731614177491 2024-11-14T19:56:19,567 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5e5e0ed0cdd54973b5e91c21904d5c64, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731614179522 2024-11-14T19:56:19,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/21cc67c8bd434f3aa49cc1052b81ba5f is 1080, key is row0104/info:/1731614179538/Put/seqid=0 2024-11-14T19:56:19,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741859_1035 (size=17906) 2024-11-14T19:56:19,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741859_1035 (size=17906) 2024-11-14T19:56:19,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/21cc67c8bd434f3aa49cc1052b81ba5f 2024-11-14T19:56:19,581 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#73 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:19,582 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/7b4865016211489d85a863ee96fb6eae is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:19,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/21cc67c8bd434f3aa49cc1052b81ba5f as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/21cc67c8bd434f3aa49cc1052b81ba5f 2024-11-14T19:56:19,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/21cc67c8bd434f3aa49cc1052b81ba5f, entries=12, sequenceid=156, filesize=17.5 K 2024-11-14T19:56:19,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 31c109b10941d0ea69361a85692887e8 in 27ms, sequenceid=156, compaction requested=false 2024-11-14T19:56:19,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:19,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T19:56:19,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741860_1036 (size=50638) 2024-11-14T19:56:19,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741860_1036 (size=50638) 2024-11-14T19:56:19,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/da56aefd6b084ef187890663ec5539e5 is 1080, key is row0116/info:/1731614179567/Put/seqid=0 2024-11-14T19:56:19,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741861_1037 (size=16828) 2024-11-14T19:56:19,604 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/7b4865016211489d85a863ee96fb6eae as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7b4865016211489d85a863ee96fb6eae 2024-11-14T19:56:19,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741861_1037 (size=16828) 2024-11-14T19:56:19,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/da56aefd6b084ef187890663ec5539e5 2024-11-14T19:56:19,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/da56aefd6b084ef187890663ec5539e5 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/da56aefd6b084ef187890663ec5539e5 2024-11-14T19:56:19,611 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into 7b4865016211489d85a863ee96fb6eae(size=49.5 K), total size for store is 66.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:19,611 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:19,611 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614179564; duration=0sec 2024-11-14T19:56:19,612 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:19,612 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:19,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/da56aefd6b084ef187890663ec5539e5, entries=11, sequenceid=170, filesize=16.4 K 2024-11-14T19:56:19,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 31c109b10941d0ea69361a85692887e8 in 24ms, sequenceid=170, compaction requested=true 2024-11-14T19:56:19,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:19,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:19,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:19,617 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:19,618 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85372 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:19,618 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:19,618 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:19,618 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7b4865016211489d85a863ee96fb6eae, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/21cc67c8bd434f3aa49cc1052b81ba5f, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/da56aefd6b084ef187890663ec5539e5] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=83.4 K 2024-11-14T19:56:19,619 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b4865016211489d85a863ee96fb6eae, keycount=42, bloomtype=ROW, size=49.5 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731614155231 2024-11-14T19:56:19,619 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 21cc67c8bd434f3aa49cc1052b81ba5f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731614179538 2024-11-14T19:56:19,620 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting da56aefd6b084ef187890663ec5539e5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731614179567 2024-11-14T19:56:19,631 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#75 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:19,632 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/26d8199017a1413c8c9a86d0da4983f5 is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:19,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741862_1038 (size=75675) 2024-11-14T19:56:19,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741862_1038 (size=75675) 2024-11-14T19:56:19,642 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/26d8199017a1413c8c9a86d0da4983f5 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/26d8199017a1413c8c9a86d0da4983f5 2024-11-14T19:56:19,648 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into 26d8199017a1413c8c9a86d0da4983f5(size=73.9 K), total size for store is 73.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:19,648 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:19,648 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614179617; duration=0sec 2024-11-14T19:56:19,648 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:19,648 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:19,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:19,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:20,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:20,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:21,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:21,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:56:21,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/32624ac50d69448ba98acd8b61448793 is 1080, key is row0127/info:/1731614179595/Put/seqid=0 2024-11-14T19:56:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741863_1039 (size=12516) 2024-11-14T19:56:21,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741863_1039 (size=12516) 2024-11-14T19:56:21,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/32624ac50d69448ba98acd8b61448793 2024-11-14T19:56:21,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/32624ac50d69448ba98acd8b61448793 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/32624ac50d69448ba98acd8b61448793 2024-11-14T19:56:21,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/32624ac50d69448ba98acd8b61448793, entries=7, sequenceid=182, filesize=12.2 K 2024-11-14T19:56:21,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 31c109b10941d0ea69361a85692887e8 in 29ms, sequenceid=182, compaction requested=false 2024-11-14T19:56:21,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:21,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:21,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T19:56:21,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/e5f18615a7cd4c10a19f43acb18ff4e9 is 1080, key is row0134/info:/1731614181611/Put/seqid=0 2024-11-14T19:56:21,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741864_1040 (size=20078) 2024-11-14T19:56:21,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741864_1040 (size=20078) 2024-11-14T19:56:21,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:21,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:21,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/e5f18615a7cd4c10a19f43acb18ff4e9 2024-11-14T19:56:21,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/e5f18615a7cd4c10a19f43acb18ff4e9 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e5f18615a7cd4c10a19f43acb18ff4e9 2024-11-14T19:56:21,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e5f18615a7cd4c10a19f43acb18ff4e9, entries=14, sequenceid=199, filesize=19.6 K 2024-11-14T19:56:21,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 31c109b10941d0ea69361a85692887e8 in 48ms, sequenceid=199, compaction requested=true 2024-11-14T19:56:21,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:21,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:21,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:21,690 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:21,691 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 108269 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:21,691 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:21,691 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:21,691 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/26d8199017a1413c8c9a86d0da4983f5, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/32624ac50d69448ba98acd8b61448793, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e5f18615a7cd4c10a19f43acb18ff4e9] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=105.7 K 2024-11-14T19:56:21,692 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26d8199017a1413c8c9a86d0da4983f5, keycount=65, bloomtype=ROW, size=73.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731614155231 2024-11-14T19:56:21,692 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 32624ac50d69448ba98acd8b61448793, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1731614179595 2024-11-14T19:56:21,692 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5f18615a7cd4c10a19f43acb18ff4e9, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731614181611 2024-11-14T19:56:21,705 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#78 average throughput is 44.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:21,706 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/ad9b629635cc4638bae9e7e482c0ac2a is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:21,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741865_1041 (size=98419) 2024-11-14T19:56:21,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741865_1041 (size=98419) 2024-11-14T19:56:21,718 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/ad9b629635cc4638bae9e7e482c0ac2a as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad9b629635cc4638bae9e7e482c0ac2a 2024-11-14T19:56:21,726 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into ad9b629635cc4638bae9e7e482c0ac2a(size=96.1 K), total size for store is 96.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:21,726 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:21,726 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614181689; duration=0sec 2024-11-14T19:56:21,726 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:21,726 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:22,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:22,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:22,943 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T19:56:22,943 INFO [master/867b237d0fa7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T19:56:23,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:23,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:23,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T19:56:23,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/514288bd0fde49aea8e7a9467790a844 is 1080, key is row0148/info:/1731614181642/Put/seqid=0 2024-11-14T19:56:23,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741866_1042 (size=20078) 2024-11-14T19:56:23,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741866_1042 (size=20078) 2024-11-14T19:56:23,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/514288bd0fde49aea8e7a9467790a844 2024-11-14T19:56:23,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/514288bd0fde49aea8e7a9467790a844 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/514288bd0fde49aea8e7a9467790a844 2024-11-14T19:56:23,720 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/514288bd0fde49aea8e7a9467790a844, entries=14, sequenceid=217, filesize=19.6 K 2024-11-14T19:56:23,721 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 31c109b10941d0ea69361a85692887e8 in 28ms, sequenceid=217, compaction requested=false 2024-11-14T19:56:23,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:23,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T19:56:23,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/c22802a4886247deb9248abad3cb4e46 is 1080, key is row0162/info:/1731614183694/Put/seqid=0 2024-11-14T19:56:23,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741867_1043 (size=17906) 2024-11-14T19:56:23,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741867_1043 (size=17906) 2024-11-14T19:56:23,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/c22802a4886247deb9248abad3cb4e46 2024-11-14T19:56:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/c22802a4886247deb9248abad3cb4e46 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c22802a4886247deb9248abad3cb4e46 2024-11-14T19:56:23,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c22802a4886247deb9248abad3cb4e46, entries=12, sequenceid=232, filesize=17.5 K 2024-11-14T19:56:23,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T19:56:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43800 deadline: 1731614193757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:23,758 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:23,758 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:23,758 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 because the exception is null or not the one we care about 2024-11-14T19:56:23,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 31c109b10941d0ea69361a85692887e8 in 37ms, sequenceid=232, compaction requested=true 2024-11-14T19:56:23,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:23,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:23,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:23,759 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:23,760 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:23,760 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:23,760 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:23,760 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad9b629635cc4638bae9e7e482c0ac2a, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/514288bd0fde49aea8e7a9467790a844, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c22802a4886247deb9248abad3cb4e46] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=133.2 K 2024-11-14T19:56:23,760 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad9b629635cc4638bae9e7e482c0ac2a, keycount=86, bloomtype=ROW, size=96.1 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731614155231 2024-11-14T19:56:23,761 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 514288bd0fde49aea8e7a9467790a844, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731614181642 2024-11-14T19:56:23,761 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting c22802a4886247deb9248abad3cb4e46, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1731614183694 2024-11-14T19:56:23,773 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#81 average throughput is 38.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:23,774 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/beeede6a924549c690141344566c4e88 is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:23,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741868_1044 (size=126685) 2024-11-14T19:56:23,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741868_1044 (size=126685) 2024-11-14T19:56:23,784 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/beeede6a924549c690141344566c4e88 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/beeede6a924549c690141344566c4e88 2024-11-14T19:56:23,790 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into beeede6a924549c690141344566c4e88(size=123.7 K), total size for store is 123.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:23,790 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:23,790 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614183759; duration=0sec 2024-11-14T19:56:23,791 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:23,791 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:24,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:24,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:25,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:25,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:26,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:26,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:27,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:27,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:27,834 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-14T19:56:28,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:28,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:29,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:29,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:30,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:30,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:31,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:31,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:32,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:32,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:33,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:33,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:33,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:33,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-14T19:56:33,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/cf5a9cb657ca4307bf4ceca13cebcd67 is 1080, key is row0174/info:/1731614183723/Put/seqid=0 2024-11-14T19:56:33,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741869_1045 (size=24394) 2024-11-14T19:56:33,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741869_1045 (size=24394) 2024-11-14T19:56:33,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/cf5a9cb657ca4307bf4ceca13cebcd67 2024-11-14T19:56:33,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/cf5a9cb657ca4307bf4ceca13cebcd67 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/cf5a9cb657ca4307bf4ceca13cebcd67 2024-11-14T19:56:33,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/cf5a9cb657ca4307bf4ceca13cebcd67, entries=18, sequenceid=254, filesize=23.8 K 2024-11-14T19:56:33,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for 31c109b10941d0ea69361a85692887e8 in 23ms, sequenceid=254, compaction requested=false 2024-11-14T19:56:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:34,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:34,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:35,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:35,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:35,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T19:56:35,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/11a663d2870947c1bc8e667ecea23959 is 1080, key is row0192/info:/1731614193769/Put/seqid=0 2024-11-14T19:56:35,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741870_1046 (size=12523) 2024-11-14T19:56:35,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741870_1046 (size=12523) 2024-11-14T19:56:35,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/11a663d2870947c1bc8e667ecea23959 2024-11-14T19:56:35,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/11a663d2870947c1bc8e667ecea23959 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/11a663d2870947c1bc8e667ecea23959 2024-11-14T19:56:35,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/11a663d2870947c1bc8e667ecea23959, entries=7, sequenceid=264, filesize=12.2 K 2024-11-14T19:56:35,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 31c109b10941d0ea69361a85692887e8 in 48ms, sequenceid=264, compaction requested=true 2024-11-14T19:56:35,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:35,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:35,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:35,835 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:35,836 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163602 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:35,836 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:35,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:35,837 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:35,837 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/beeede6a924549c690141344566c4e88, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/cf5a9cb657ca4307bf4ceca13cebcd67, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/11a663d2870947c1bc8e667ecea23959] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=159.8 K 2024-11-14T19:56:35,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T19:56:35,837 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting beeede6a924549c690141344566c4e88, keycount=112, bloomtype=ROW, size=123.7 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1731614155231 2024-11-14T19:56:35,838 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf5a9cb657ca4307bf4ceca13cebcd67, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731614183723 2024-11-14T19:56:35,839 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 11a663d2870947c1bc8e667ecea23959, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1731614193769 2024-11-14T19:56:35,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/bf6fe4afbc074432971a6b99402558b2 is 1080, key is row0199/info:/1731614195788/Put/seqid=0 2024-11-14T19:56:35,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741871_1047 (size=19013) 2024-11-14T19:56:35,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741871_1047 (size=19013) 2024-11-14T19:56:35,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/bf6fe4afbc074432971a6b99402558b2 2024-11-14T19:56:35,892 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#85 average throughput is 28.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:35,893 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/d6ae55d11d8a4b4a87299a62897fad20 is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:35,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/bf6fe4afbc074432971a6b99402558b2 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bf6fe4afbc074432971a6b99402558b2 2024-11-14T19:56:35,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bf6fe4afbc074432971a6b99402558b2, entries=13, sequenceid=280, filesize=18.6 K 2024-11-14T19:56:35,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=13.66 KB/13988 for 31c109b10941d0ea69361a85692887e8 in 64ms, sequenceid=280, compaction requested=false 2024-11-14T19:56:35,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:35,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741872_1048 (size=153817) 2024-11-14T19:56:35,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741872_1048 (size=153817) 2024-11-14T19:56:36,322 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/d6ae55d11d8a4b4a87299a62897fad20 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/d6ae55d11d8a4b4a87299a62897fad20 2024-11-14T19:56:36,330 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into d6ae55d11d8a4b4a87299a62897fad20(size=150.2 K), total size for store is 168.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:36,331 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:36,331 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614195834; duration=0sec 2024-11-14T19:56:36,331 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:36,331 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:36,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:36,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:37,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:37,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:37,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T19:56:37,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/5b7307f32cf848f591136d2bcba78c0a is 1080, key is row0212/info:/1731614195839/Put/seqid=0 2024-11-14T19:56:37,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741873_1049 (size=20092) 2024-11-14T19:56:37,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741873_1049 (size=20092) 2024-11-14T19:56:37,913 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/5b7307f32cf848f591136d2bcba78c0a 2024-11-14T19:56:37,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T19:56:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43800 deadline: 1731614207914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:37,916 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:37,916 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:37,916 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 because the exception is null or not the one we care about 2024-11-14T19:56:37,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/5b7307f32cf848f591136d2bcba78c0a as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5b7307f32cf848f591136d2bcba78c0a 2024-11-14T19:56:37,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5b7307f32cf848f591136d2bcba78c0a, entries=14, sequenceid=298, filesize=19.6 K 2024-11-14T19:56:37,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for 31c109b10941d0ea69361a85692887e8 in 53ms, sequenceid=298, compaction requested=true 2024-11-14T19:56:37,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:37,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:37,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:37,928 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:37,929 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192922 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:37,930 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:37,930 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:37,930 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/d6ae55d11d8a4b4a87299a62897fad20, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bf6fe4afbc074432971a6b99402558b2, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5b7307f32cf848f591136d2bcba78c0a] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=188.4 K 2024-11-14T19:56:37,931 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6ae55d11d8a4b4a87299a62897fad20, keycount=137, bloomtype=ROW, size=150.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1731614155231 2024-11-14T19:56:37,931 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf6fe4afbc074432971a6b99402558b2, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1731614195788 2024-11-14T19:56:37,931 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b7307f32cf848f591136d2bcba78c0a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731614195839 2024-11-14T19:56:37,946 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#87 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:37,947 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/7d2aa7b05ff6446ab21caff6dbf820b2 is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:37,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741874_1050 (size=183060) 2024-11-14T19:56:37,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741874_1050 (size=183060) 2024-11-14T19:56:37,961 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/7d2aa7b05ff6446ab21caff6dbf820b2 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7d2aa7b05ff6446ab21caff6dbf820b2 2024-11-14T19:56:37,968 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into 7d2aa7b05ff6446ab21caff6dbf820b2(size=178.8 K), total size for store is 178.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:37,968 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:37,968 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614197928; duration=0sec 2024-11-14T19:56:37,968 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:37,968 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:38,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:38,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:39,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:39,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:40,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:40,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:41,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:41,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:41,708 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T19:56:42,136 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=70, reuseRatio=88.61% 2024-11-14T19:56:42,151 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-14T19:56:42,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:42,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:43,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:43,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:44,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:44,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:45,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:45,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:46,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:46,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:47,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:47,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:47,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:47,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-14T19:56:47,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/870be9beeb9e4e67b5dffcf1e23b98a4 is 1080, key is row0226/info:/1731614197876/Put/seqid=0 2024-11-14T19:56:47,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741875_1051 (size=22254) 2024-11-14T19:56:47,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741875_1051 (size=22254) 2024-11-14T19:56:47,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T19:56:47,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:43800 deadline: 1731614217975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 2024-11-14T19:56:47,976 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:47,976 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=31c109b10941d0ea69361a85692887e8, server=867b237d0fa7,36939,1731614141870 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T19:56:47,976 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., hostname=867b237d0fa7,36939,1731614141870, seqNum=89 because the exception is null or not the one we care about 2024-11-14T19:56:48,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/870be9beeb9e4e67b5dffcf1e23b98a4 2024-11-14T19:56:48,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/870be9beeb9e4e67b5dffcf1e23b98a4 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/870be9beeb9e4e67b5dffcf1e23b98a4 2024-11-14T19:56:48,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/870be9beeb9e4e67b5dffcf1e23b98a4, entries=16, sequenceid=318, filesize=21.7 K 2024-11-14T19:56:48,382 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 31c109b10941d0ea69361a85692887e8 in 433ms, sequenceid=318, compaction requested=false 2024-11-14T19:56:48,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:48,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:48,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:49,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:49,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:50,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:50,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:50,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:50,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,427 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T19:56:51,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,473 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T19:56:51,481 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b358000d261efa4ae5748383aa975bac, had cached 0 bytes from a total of 70862 2024-11-14T19:56:51,482 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 31c109b10941d0ea69361a85692887e8, had cached 0 bytes from a total of 205314 2024-11-14T19:56:51,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:51,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:52,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:52,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:53,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:53,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:54,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:54,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:55,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:55,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:56,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:56,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:57,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:57,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36939 {}] regionserver.HRegion(8855): Flush requested on 31c109b10941d0ea69361a85692887e8 2024-11-14T19:56:58,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T19:56:58,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/1ec460dfc833452fa45f9d1a609cccb0 is 1080, key is row0242/info:/1731614207949/Put/seqid=0 2024-11-14T19:56:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741876_1052 (size=20092) 2024-11-14T19:56:58,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741876_1052 (size=20092) 2024-11-14T19:56:58,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/1ec460dfc833452fa45f9d1a609cccb0 2024-11-14T19:56:58,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/1ec460dfc833452fa45f9d1a609cccb0 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/1ec460dfc833452fa45f9d1a609cccb0 2024-11-14T19:56:58,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/1ec460dfc833452fa45f9d1a609cccb0, entries=14, sequenceid=335, filesize=19.6 K 2024-11-14T19:56:58,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 31c109b10941d0ea69361a85692887e8 in 25ms, sequenceid=335, compaction requested=true 2024-11-14T19:56:58,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:58,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 31c109b10941d0ea69361a85692887e8:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T19:56:58,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:58,083 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T19:56:58,085 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 225406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T19:56:58,085 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1541): 31c109b10941d0ea69361a85692887e8/info is initiating minor compaction (all files) 2024-11-14T19:56:58,085 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 31c109b10941d0ea69361a85692887e8/info in TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:56:58,085 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7d2aa7b05ff6446ab21caff6dbf820b2, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/870be9beeb9e4e67b5dffcf1e23b98a4, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/1ec460dfc833452fa45f9d1a609cccb0] into tmpdir=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp, totalSize=220.1 K 2024-11-14T19:56:58,085 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d2aa7b05ff6446ab21caff6dbf820b2, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731614155231 2024-11-14T19:56:58,086 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 870be9beeb9e4e67b5dffcf1e23b98a4, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1731614197876 2024-11-14T19:56:58,086 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ec460dfc833452fa45f9d1a609cccb0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1731614207949 2024-11-14T19:56:58,103 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 31c109b10941d0ea69361a85692887e8#info#compaction#90 average throughput is 66.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T19:56:58,103 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/e3e15e60f3c24ddfa36b8b999375474f is 1080, key is row0062/info:/1731614155231/Put/seqid=0 2024-11-14T19:56:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741877_1053 (size=215609) 2024-11-14T19:56:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741877_1053 (size=215609) 2024-11-14T19:56:58,116 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/e3e15e60f3c24ddfa36b8b999375474f as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e3e15e60f3c24ddfa36b8b999375474f 2024-11-14T19:56:58,122 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 31c109b10941d0ea69361a85692887e8/info of 31c109b10941d0ea69361a85692887e8 into e3e15e60f3c24ddfa36b8b999375474f(size=210.6 K), total size for store is 210.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T19:56:58,123 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:56:58,123 INFO [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., storeName=31c109b10941d0ea69361a85692887e8/info, priority=13, startTime=1731614218083; duration=0sec 2024-11-14T19:56:58,123 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T19:56:58,123 DEBUG [RS:0;867b237d0fa7:36939-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 31c109b10941d0ea69361a85692887e8:info 2024-11-14T19:56:58,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:58,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:59,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:56:59,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:00,061 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-14T19:57:00,061 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C36939%2C1731614141870.1731614220061 2024-11-14T19:57:00,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,081 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,081 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,081 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,081 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614142391 with entries=316, filesize=309.65 KB; new WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614220061 2024-11-14T19:57:00,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35647:35647),(127.0.0.1/127.0.0.1:36971:36971)] 2024-11-14T19:57:00,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614142391 is not closed yet, will try archiving it next time 2024-11-14T19:57:00,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741833_1009 (size=317093) 2024-11-14T19:57:00,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741833_1009 (size=317093) 2024-11-14T19:57:00,086 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 31c109b10941d0ea69361a85692887e8 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T19:57:00,126 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/42189fcded984819b1a076a53f6bf9b8 is 1080, key is row0256/info:/1731614218060/Put/seqid=0 2024-11-14T19:57:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741879_1055 (size=6035) 2024-11-14T19:57:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741879_1055 (size=6035) 2024-11-14T19:57:00,134 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/42189fcded984819b1a076a53f6bf9b8 2024-11-14T19:57:00,140 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/.tmp/info/42189fcded984819b1a076a53f6bf9b8 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/42189fcded984819b1a076a53f6bf9b8 2024-11-14T19:57:00,145 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/42189fcded984819b1a076a53f6bf9b8, entries=1, sequenceid=340, filesize=5.9 K 2024-11-14T19:57:00,147 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 31c109b10941d0ea69361a85692887e8 in 60ms, sequenceid=340, compaction requested=false 2024-11-14T19:57:00,147 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 31c109b10941d0ea69361a85692887e8: 2024-11-14T19:57:00,147 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-14T19:57:00,151 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/info/4a5f05db5cb04529a720883b570be198 is 193, key is TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8./info:regioninfo/1731614166132/Put/seqid=0 2024-11-14T19:57:00,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741880_1056 (size=6223) 2024-11-14T19:57:00,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741880_1056 (size=6223) 2024-11-14T19:57:00,156 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/info/4a5f05db5cb04529a720883b570be198 2024-11-14T19:57:00,163 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/.tmp/info/4a5f05db5cb04529a720883b570be198 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/info/4a5f05db5cb04529a720883b570be198 2024-11-14T19:57:00,169 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/info/4a5f05db5cb04529a720883b570be198, entries=5, sequenceid=21, filesize=6.1 K 2024-11-14T19:57:00,170 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-11-14T19:57:00,171 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T19:57:00,171 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b358000d261efa4ae5748383aa975bac: 2024-11-14T19:57:00,171 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C36939%2C1731614141870.1731614220171 2024-11-14T19:57:00,184 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,184 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,184 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,184 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,184 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,184 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614220061 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614220171 2024-11-14T19:57:00,185 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35647:35647),(127.0.0.1/127.0.0.1:36971:36971)] 2024-11-14T19:57:00,185 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614220061 is not closed yet, will try archiving it next time 2024-11-14T19:57:00,185 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614142391 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/oldWALs/867b237d0fa7%2C36939%2C1731614141870.1731614142391 2024-11-14T19:57:00,186 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T19:57:00,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741878_1054 (size=731) 2024-11-14T19:57:00,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741878_1054 (size=731) 2024-11-14T19:57:00,588 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/WALs/867b237d0fa7,36939,1731614141870/867b237d0fa7%2C36939%2C1731614141870.1731614220061 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/oldWALs/867b237d0fa7%2C36939%2C1731614141870.1731614220061 2024-11-14T19:57:00,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:00,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:00,687 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:57:00,687 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:57:00,687 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:57:00,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:00,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:00,687 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:57:00,687 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:57:00,688 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=486344968, stopped=false 2024-11-14T19:57:00,688 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,44359,1731614141733 2024-11-14T19:57:00,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:57:00,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:57:00,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:00,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:00,730 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:57:00,731 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:57:00,731 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:57:00,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:00,731 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,36939,1731614141870' ***** 2024-11-14T19:57:00,731 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:57:00,732 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:57:00,732 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(3091): Received CLOSE for 31c109b10941d0ea69361a85692887e8 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(3091): Received CLOSE for b358000d261efa4ae5748383aa975bac 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,36939,1731614141870 2024-11-14T19:57:00,732 INFO [RS:0;867b237d0fa7:36939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:57:00,733 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 31c109b10941d0ea69361a85692887e8, disabling compactions & flushes 2024-11-14T19:57:00,733 INFO [RS:0;867b237d0fa7:36939 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:36939. 2024-11-14T19:57:00,733 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:57:00,733 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:57:00,733 DEBUG [RS:0;867b237d0fa7:36939 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:57:00,733 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. after waiting 0 ms 2024-11-14T19:57:00,733 DEBUG [RS:0;867b237d0fa7:36939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:00,733 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:57:00,733 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:57:00,733 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:57:00,733 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:57:00,733 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:57:00,733 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-14T19:57:00,733 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1325): Online Regions={31c109b10941d0ea69361a85692887e8=TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8., 1588230740=hbase:meta,,1.1588230740, b358000d261efa4ae5748383aa975bac=TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.} 2024-11-14T19:57:00,734 DEBUG [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 31c109b10941d0ea69361a85692887e8, b358000d261efa4ae5748383aa975bac 2024-11-14T19:57:00,734 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:57:00,734 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:57:00,734 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:57:00,734 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:57:00,734 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:57:00,734 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e->hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a-top, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c0a71bae47ae497280fdfabbbf96ae1c, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/20874f2d65ec439a95639bbbb9b84786, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/487ca577e99d4ac4b4d85146c30ebd00, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/b6da87ad2af74c30b81fd4a41cc15a87, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bc9b2f414d9d47d8b06ce555c9e9a39f, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7b4865016211489d85a863ee96fb6eae, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5e5e0ed0cdd54973b5e91c21904d5c64, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/21cc67c8bd434f3aa49cc1052b81ba5f, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/26d8199017a1413c8c9a86d0da4983f5, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/da56aefd6b084ef187890663ec5539e5, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/32624ac50d69448ba98acd8b61448793, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad9b629635cc4638bae9e7e482c0ac2a, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e5f18615a7cd4c10a19f43acb18ff4e9, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/514288bd0fde49aea8e7a9467790a844, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/beeede6a924549c690141344566c4e88, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c22802a4886247deb9248abad3cb4e46, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/cf5a9cb657ca4307bf4ceca13cebcd67, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/d6ae55d11d8a4b4a87299a62897fad20, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/11a663d2870947c1bc8e667ecea23959, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bf6fe4afbc074432971a6b99402558b2, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7d2aa7b05ff6446ab21caff6dbf820b2, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5b7307f32cf848f591136d2bcba78c0a, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/870be9beeb9e4e67b5dffcf1e23b98a4, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/1ec460dfc833452fa45f9d1a609cccb0] to archive 2024-11-14T19:57:00,735 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T19:57:00,738 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:57:00,738 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:57:00,740 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c0a71bae47ae497280fdfabbbf96ae1c to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c0a71bae47ae497280fdfabbbf96ae1c 2024-11-14T19:57:00,740 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-14T19:57:00,741 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:57:00,741 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:57:00,741 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/TestLogRolling-testLogRolling=229a9f60e73dc7c25a0778695d1fe42e-5d8b7911a272442384de9763a52e4b66 2024-11-14T19:57:00,741 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614220734Running coprocessor pre-close hooks at 1731614220734Disabling compacts and flushes for region at 1731614220734Disabling writes for close at 1731614220734Writing region close event to WAL at 1731614220736 (+2 ms)Running coprocessor post-close hooks at 1731614220741 (+5 ms)Closed at 1731614220741 2024-11-14T19:57:00,741 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:57:00,742 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/20874f2d65ec439a95639bbbb9b84786 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/20874f2d65ec439a95639bbbb9b84786 2024-11-14T19:57:00,743 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/487ca577e99d4ac4b4d85146c30ebd00 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/487ca577e99d4ac4b4d85146c30ebd00 2024-11-14T19:57:00,744 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/b6da87ad2af74c30b81fd4a41cc15a87 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/b6da87ad2af74c30b81fd4a41cc15a87 2024-11-14T19:57:00,745 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bc9b2f414d9d47d8b06ce555c9e9a39f to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bc9b2f414d9d47d8b06ce555c9e9a39f 2024-11-14T19:57:00,746 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7b4865016211489d85a863ee96fb6eae to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7b4865016211489d85a863ee96fb6eae 2024-11-14T19:57:00,747 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5e5e0ed0cdd54973b5e91c21904d5c64 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5e5e0ed0cdd54973b5e91c21904d5c64 2024-11-14T19:57:00,748 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/21cc67c8bd434f3aa49cc1052b81ba5f to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/21cc67c8bd434f3aa49cc1052b81ba5f 2024-11-14T19:57:00,750 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/26d8199017a1413c8c9a86d0da4983f5 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/26d8199017a1413c8c9a86d0da4983f5 2024-11-14T19:57:00,751 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/da56aefd6b084ef187890663ec5539e5 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/da56aefd6b084ef187890663ec5539e5 2024-11-14T19:57:00,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/32624ac50d69448ba98acd8b61448793 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/32624ac50d69448ba98acd8b61448793 2024-11-14T19:57:00,753 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad9b629635cc4638bae9e7e482c0ac2a to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/ad9b629635cc4638bae9e7e482c0ac2a 2024-11-14T19:57:00,754 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e5f18615a7cd4c10a19f43acb18ff4e9 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/e5f18615a7cd4c10a19f43acb18ff4e9 2024-11-14T19:57:00,755 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/514288bd0fde49aea8e7a9467790a844 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/514288bd0fde49aea8e7a9467790a844 2024-11-14T19:57:00,756 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/beeede6a924549c690141344566c4e88 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/beeede6a924549c690141344566c4e88 2024-11-14T19:57:00,757 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c22802a4886247deb9248abad3cb4e46 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/c22802a4886247deb9248abad3cb4e46 2024-11-14T19:57:00,758 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/cf5a9cb657ca4307bf4ceca13cebcd67 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/cf5a9cb657ca4307bf4ceca13cebcd67 2024-11-14T19:57:00,758 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/d6ae55d11d8a4b4a87299a62897fad20 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/d6ae55d11d8a4b4a87299a62897fad20 2024-11-14T19:57:00,759 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/11a663d2870947c1bc8e667ecea23959 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/11a663d2870947c1bc8e667ecea23959 2024-11-14T19:57:00,760 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bf6fe4afbc074432971a6b99402558b2 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/bf6fe4afbc074432971a6b99402558b2 2024-11-14T19:57:00,761 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7d2aa7b05ff6446ab21caff6dbf820b2 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/7d2aa7b05ff6446ab21caff6dbf820b2 2024-11-14T19:57:00,762 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5b7307f32cf848f591136d2bcba78c0a to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/5b7307f32cf848f591136d2bcba78c0a 2024-11-14T19:57:00,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/870be9beeb9e4e67b5dffcf1e23b98a4 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/870be9beeb9e4e67b5dffcf1e23b98a4 2024-11-14T19:57:00,764 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/1ec460dfc833452fa45f9d1a609cccb0 to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/info/1ec460dfc833452fa45f9d1a609cccb0 2024-11-14T19:57:00,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=867b237d0fa7:44359 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T19:57:00,766 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c0a71bae47ae497280fdfabbbf96ae1c=8359, 20874f2d65ec439a95639bbbb9b84786=12509, 487ca577e99d4ac4b4d85146c30ebd00=30029, b6da87ad2af74c30b81fd4a41cc15a87=18987, bc9b2f414d9d47d8b06ce555c9e9a39f=17895, 7b4865016211489d85a863ee96fb6eae=50638, 5e5e0ed0cdd54973b5e91c21904d5c64=12516, 21cc67c8bd434f3aa49cc1052b81ba5f=17906, 26d8199017a1413c8c9a86d0da4983f5=75675, da56aefd6b084ef187890663ec5539e5=16828, 32624ac50d69448ba98acd8b61448793=12516, ad9b629635cc4638bae9e7e482c0ac2a=98419, e5f18615a7cd4c10a19f43acb18ff4e9=20078, 514288bd0fde49aea8e7a9467790a844=20078, beeede6a924549c690141344566c4e88=126685, c22802a4886247deb9248abad3cb4e46=17906, cf5a9cb657ca4307bf4ceca13cebcd67=24394, d6ae55d11d8a4b4a87299a62897fad20=153817, 11a663d2870947c1bc8e667ecea23959=12523, bf6fe4afbc074432971a6b99402558b2=19013, 7d2aa7b05ff6446ab21caff6dbf820b2=183060, 5b7307f32cf848f591136d2bcba78c0a=20092, 870be9beeb9e4e67b5dffcf1e23b98a4=22254, 1ec460dfc833452fa45f9d1a609cccb0=20092] 2024-11-14T19:57:00,769 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/31c109b10941d0ea69361a85692887e8/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=88 2024-11-14T19:57:00,770 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:57:00,770 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 31c109b10941d0ea69361a85692887e8: Waiting for close lock at 1731614220732Running coprocessor pre-close hooks at 1731614220732Disabling compacts and flushes for region at 1731614220732Disabling writes for close at 1731614220733 (+1 ms)Writing region close event to WAL at 1731614220766 (+33 ms)Running coprocessor post-close hooks at 1731614220770 (+4 ms)Closed at 1731614220770 2024-11-14T19:57:00,770 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731614165415.31c109b10941d0ea69361a85692887e8. 2024-11-14T19:57:00,771 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b358000d261efa4ae5748383aa975bac, disabling compactions & flushes 2024-11-14T19:57:00,771 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:57:00,771 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:57:00,771 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. after waiting 0 ms 2024-11-14T19:57:00,771 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:57:00,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e->hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/229a9f60e73dc7c25a0778695d1fe42e/info/ad4bd4d6ea6c429a9d3c4963a6932f2a-bottom] to archive 2024-11-14T19:57:00,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T19:57:00,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e to hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/archive/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/info/ad4bd4d6ea6c429a9d3c4963a6932f2a.229a9f60e73dc7c25a0778695d1fe42e 2024-11-14T19:57:00,774 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-14T19:57:00,777 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/data/default/TestLogRolling-testLogRolling/b358000d261efa4ae5748383aa975bac/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-14T19:57:00,778 INFO [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:57:00,778 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b358000d261efa4ae5748383aa975bac: Waiting for close lock at 1731614220770Running coprocessor pre-close hooks at 1731614220770Disabling compacts and flushes for region at 1731614220771 (+1 ms)Disabling writes for close at 1731614220771Writing region close event to WAL at 1731614220774 (+3 ms)Running coprocessor post-close hooks at 1731614220778 (+4 ms)Closed at 1731614220778 2024-11-14T19:57:00,778 DEBUG [RS_CLOSE_REGION-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731614165415.b358000d261efa4ae5748383aa975bac. 2024-11-14T19:57:00,934 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,36939,1731614141870; all regions closed. 2024-11-14T19:57:00,935 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,935 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,935 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,935 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,935 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741834_1010 (size=8107) 2024-11-14T19:57:00,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741834_1010 (size=8107) 2024-11-14T19:57:00,942 DEBUG [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/oldWALs 2024-11-14T19:57:00,942 INFO [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C36939%2C1731614141870.meta:.meta(num 1731614142822) 2024-11-14T19:57:00,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,943 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741881_1057 (size=778) 2024-11-14T19:57:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741881_1057 (size=778) 2024-11-14T19:57:00,949 DEBUG [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/oldWALs 2024-11-14T19:57:00,949 INFO [RS:0;867b237d0fa7:36939 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C36939%2C1731614141870:(num 1731614220171) 2024-11-14T19:57:00,949 DEBUG [RS:0;867b237d0fa7:36939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:00,949 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:57:00,949 INFO [RS:0;867b237d0fa7:36939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:57:00,950 INFO [RS:0;867b237d0fa7:36939 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T19:57:00,950 INFO [RS:0;867b237d0fa7:36939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:57:00,950 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:57:00,950 INFO [RS:0;867b237d0fa7:36939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36939 2024-11-14T19:57:00,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,36939,1731614141870 2024-11-14T19:57:00,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:57:00,958 INFO [RS:0;867b237d0fa7:36939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:57:00,966 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,36939,1731614141870] 2024-11-14T19:57:00,974 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,36939,1731614141870 already deleted, retry=false 2024-11-14T19:57:00,975 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,36939,1731614141870 expired; onlineServers=0 2024-11-14T19:57:00,975 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,44359,1731614141733' ***** 2024-11-14T19:57:00,975 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:57:00,975 INFO [M:0;867b237d0fa7:44359 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:57:00,975 INFO [M:0;867b237d0fa7:44359 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:57:00,975 DEBUG [M:0;867b237d0fa7:44359 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:57:00,975 DEBUG [M:0;867b237d0fa7:44359 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:57:00,975 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:57:00,975 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614142190 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614142190,5,FailOnTimeoutGroup] 2024-11-14T19:57:00,975 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614142190 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614142190,5,FailOnTimeoutGroup] 2024-11-14T19:57:00,975 INFO [M:0;867b237d0fa7:44359 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:57:00,975 INFO [M:0;867b237d0fa7:44359 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:57:00,976 DEBUG [M:0;867b237d0fa7:44359 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:57:00,976 INFO [M:0;867b237d0fa7:44359 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:57:00,976 INFO [M:0;867b237d0fa7:44359 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:57:00,976 INFO [M:0;867b237d0fa7:44359 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:57:00,976 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:57:00,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:57:00,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:00,983 DEBUG [M:0;867b237d0fa7:44359 {}] zookeeper.ZKUtil(347): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T19:57:00,983 WARN [M:0;867b237d0fa7:44359 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T19:57:00,984 INFO [M:0;867b237d0fa7:44359 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/.lastflushedseqids 2024-11-14T19:57:00,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741882_1058 (size=228) 2024-11-14T19:57:00,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741882_1058 (size=228) 2024-11-14T19:57:00,992 INFO [M:0;867b237d0fa7:44359 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:57:00,992 INFO [M:0;867b237d0fa7:44359 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:57:00,992 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:57:00,992 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:00,992 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:00,992 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:57:00,992 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:00,992 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-11-14T19:57:01,011 DEBUG [M:0;867b237d0fa7:44359 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03d2dc54314843818f2f9152657761cb is 82, key is hbase:meta,,1/info:regioninfo/1731614142855/Put/seqid=0 2024-11-14T19:57:01,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741883_1059 (size=5672) 2024-11-14T19:57:01,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741883_1059 (size=5672) 2024-11-14T19:57:01,016 INFO [M:0;867b237d0fa7:44359 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03d2dc54314843818f2f9152657761cb 2024-11-14T19:57:01,039 DEBUG [M:0;867b237d0fa7:44359 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3c7cfb1248a44ed891d4d7522ff14e8b is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731614143402/Put/seqid=0 2024-11-14T19:57:01,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741884_1060 (size=7089) 2024-11-14T19:57:01,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741884_1060 (size=7089) 2024-11-14T19:57:01,045 INFO [M:0;867b237d0fa7:44359 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3c7cfb1248a44ed891d4d7522ff14e8b 2024-11-14T19:57:01,050 INFO [M:0;867b237d0fa7:44359 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3c7cfb1248a44ed891d4d7522ff14e8b 2024-11-14T19:57:01,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:01,067 INFO [RS:0;867b237d0fa7:36939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:57:01,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36939-0x1013c18b2b80001, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:01,067 INFO [RS:0;867b237d0fa7:36939 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,36939,1731614141870; zookeeper connection closed. 2024-11-14T19:57:01,067 DEBUG [M:0;867b237d0fa7:44359 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3b207bbc367844f0b3996b10951afbc7 is 69, key is 867b237d0fa7,36939,1731614141870/rs:state/1731614142234/Put/seqid=0 2024-11-14T19:57:01,067 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1fdcc4fe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1fdcc4fe 2024-11-14T19:57:01,067 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T19:57:01,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741885_1061 (size=5156) 2024-11-14T19:57:01,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741885_1061 (size=5156) 2024-11-14T19:57:01,072 INFO [M:0;867b237d0fa7:44359 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3b207bbc367844f0b3996b10951afbc7 2024-11-14T19:57:01,091 DEBUG [M:0;867b237d0fa7:44359 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f9d68e6da7e349e386823b706f280fc8 is 52, key is load_balancer_on/state:d/1731614143012/Put/seqid=0 2024-11-14T19:57:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741886_1062 (size=5056) 2024-11-14T19:57:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741886_1062 (size=5056) 2024-11-14T19:57:01,096 INFO [M:0;867b237d0fa7:44359 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f9d68e6da7e349e386823b706f280fc8 2024-11-14T19:57:01,102 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03d2dc54314843818f2f9152657761cb as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03d2dc54314843818f2f9152657761cb 2024-11-14T19:57:01,107 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03d2dc54314843818f2f9152657761cb, entries=8, sequenceid=125, filesize=5.5 K 2024-11-14T19:57:01,108 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3c7cfb1248a44ed891d4d7522ff14e8b as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3c7cfb1248a44ed891d4d7522ff14e8b 2024-11-14T19:57:01,113 INFO [M:0;867b237d0fa7:44359 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3c7cfb1248a44ed891d4d7522ff14e8b 2024-11-14T19:57:01,113 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3c7cfb1248a44ed891d4d7522ff14e8b, entries=13, sequenceid=125, filesize=6.9 K 2024-11-14T19:57:01,114 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3b207bbc367844f0b3996b10951afbc7 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3b207bbc367844f0b3996b10951afbc7 2024-11-14T19:57:01,119 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3b207bbc367844f0b3996b10951afbc7, entries=1, sequenceid=125, filesize=5.0 K 2024-11-14T19:57:01,120 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f9d68e6da7e349e386823b706f280fc8 as hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f9d68e6da7e349e386823b706f280fc8 2024-11-14T19:57:01,125 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39689/user/jenkins/test-data/6238ca86-8812-804a-3c33-cc75a5743d66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f9d68e6da7e349e386823b706f280fc8, entries=1, sequenceid=125, filesize=4.9 K 2024-11-14T19:57:01,127 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=125, compaction requested=false 2024-11-14T19:57:01,128 INFO [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:01,128 DEBUG [M:0;867b237d0fa7:44359 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614220992Disabling compacts and flushes for region at 1731614220992Disabling writes for close at 1731614220992Obtaining lock to block concurrent updates at 1731614220992Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731614220992Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1731614220993 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731614220993Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731614220993Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731614221011 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731614221011Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614221020 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614221039 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614221039Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614221050 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614221066 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614221066Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614221076 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614221090 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614221090Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b1d4bc4: reopening flushed file at 1731614221101 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53010c0a: reopening flushed file at 1731614221107 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ed1b82b: reopening flushed file at 1731614221113 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b678034: reopening flushed file at 1731614221120 (+7 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 134ms, sequenceid=125, compaction requested=false at 1731614221127 (+7 ms)Writing region close event to WAL at 1731614221128 (+1 ms)Closed at 1731614221128 2024-11-14T19:57:01,128 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:01,129 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:01,129 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:01,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:01,129 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:01,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44395 is added to blk_1073741830_1006 (size=61308) 2024-11-14T19:57:01,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37851 is added to blk_1073741830_1006 (size=61308) 2024-11-14T19:57:01,132 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:57:01,132 INFO [M:0;867b237d0fa7:44359 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:57:01,132 INFO [M:0;867b237d0fa7:44359 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44359 2024-11-14T19:57:01,132 INFO [M:0;867b237d0fa7:44359 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:57:01,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:01,241 INFO [M:0;867b237d0fa7:44359 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:57:01,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44359-0x1013c18b2b80000, quorum=127.0.0.1:58796, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:01,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@247322e7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:57:01,244 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@173a06b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:57:01,244 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:57:01,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b969725{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:57:01,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36a8171e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir/,STOPPED} 2024-11-14T19:57:01,245 WARN [BP-456699626-172.17.0.2-1731614140350 heartbeating to localhost/127.0.0.1:39689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:57:01,245 WARN [BP-456699626-172.17.0.2-1731614140350 heartbeating to localhost/127.0.0.1:39689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-456699626-172.17.0.2-1731614140350 (Datanode Uuid eff9e298-4f38-4ea0-a706-4b632e33d878) service to localhost/127.0.0.1:39689 2024-11-14T19:57:01,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data3/current/BP-456699626-172.17.0.2-1731614140350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:01,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data4/current/BP-456699626-172.17.0.2-1731614140350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:01,246 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:57:01,246 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:57:01,246 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:57:01,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74aeebd9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:57:01,249 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17d44920{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:57:01,249 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:57:01,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@197b82ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:57:01,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59cf886d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir/,STOPPED} 2024-11-14T19:57:01,250 WARN [BP-456699626-172.17.0.2-1731614140350 heartbeating to localhost/127.0.0.1:39689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:57:01,250 WARN [BP-456699626-172.17.0.2-1731614140350 heartbeating to localhost/127.0.0.1:39689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-456699626-172.17.0.2-1731614140350 (Datanode Uuid 9da3c350-5010-4b5d-b5f8-03bde92f3da2) service to localhost/127.0.0.1:39689 2024-11-14T19:57:01,250 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:57:01,250 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:57:01,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data1/current/BP-456699626-172.17.0.2-1731614140350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:01,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/cluster_9a93c74c-8c1a-c094-9f67-36106bcfcf14/data/data2/current/BP-456699626-172.17.0.2-1731614140350 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:01,251 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:57:01,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3978da62{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:57:01,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a583b2e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:57:01,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:57:01,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63529b4a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:57:01,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9c2a749{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir/,STOPPED} 2024-11-14T19:57:01,264 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:57:01,295 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:57:01,304 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=226 (was 205) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:39689 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:39689 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39689 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:39689 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39689 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=506 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=189 (was 177) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5206 (was 5702) 2024-11-14T19:57:01,313 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=226, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=189, ProcessCount=11, AvailableMemoryMB=5205 2024-11-14T19:57:01,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T19:57:01,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.log.dir so I do NOT create it in target/test-data/27e5752b-5184-cacf-ede6-54714929153b 2024-11-14T19:57:01,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ac354666-f2de-5fc6-3b68-f773b5e414c8/hadoop.tmp.dir so I do NOT create it in target/test-data/27e5752b-5184-cacf-ede6-54714929153b 2024-11-14T19:57:01,313 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f, deleteOnExit=true 2024-11-14T19:57:01,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/test.cache.data in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T19:57:01,314 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T19:57:01,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/nfs.dump.dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/java.io.tmpdir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T19:57:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T19:57:01,329 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:57:01,553 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:57:01,557 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:57:01,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:57:01,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:57:01,558 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:57:01,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:57:01,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42db1fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:57:01,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56ff1634{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:57:01,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1223f548{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/java.io.tmpdir/jetty-localhost-34013-hadoop-hdfs-3_4_1-tests_jar-_-any-16572576257942475792/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:57:01,679 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40cff3e3{HTTP/1.1, (http/1.1)}{localhost:34013} 2024-11-14T19:57:01,679 INFO [Time-limited test {}] server.Server(415): Started @333725ms 2024-11-14T19:57:01,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:01,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:01,692 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T19:57:01,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:57:01,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:57:01,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:57:01,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:57:01,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T19:57:01,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d36c2e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:57:01,864 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6479a298{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:57:01,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77086409{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/java.io.tmpdir/jetty-localhost-42605-hadoop-hdfs-3_4_1-tests_jar-_-any-7000784940065099803/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:57:01,965 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7596481a{HTTP/1.1, (http/1.1)}{localhost:42605} 2024-11-14T19:57:01,965 INFO [Time-limited test {}] server.Server(415): Started @334011ms 2024-11-14T19:57:01,966 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:57:01,993 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T19:57:01,996 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T19:57:01,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T19:57:01,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T19:57:01,997 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T19:57:01,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7914027{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir/,AVAILABLE} 2024-11-14T19:57:01,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@769a95c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T19:57:02,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f3d7578{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/java.io.tmpdir/jetty-localhost-37139-hadoop-hdfs-3_4_1-tests_jar-_-any-14419567949756837033/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:57:02,101 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5307a3e{HTTP/1.1, (http/1.1)}{localhost:37139} 2024-11-14T19:57:02,101 INFO [Time-limited test {}] server.Server(415): Started @334147ms 2024-11-14T19:57:02,103 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T19:57:02,260 INFO [regionserver/867b237d0fa7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:57:02,512 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data1/current/BP-882977484-172.17.0.2-1731614221333/current, will proceed with Du for space computation calculation, 2024-11-14T19:57:02,512 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data2/current/BP-882977484-172.17.0.2-1731614221333/current, will proceed with Du for space computation calculation, 2024-11-14T19:57:02,536 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:57:02,538 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd662e9d7bb74773 with lease ID 0xcf74028dba9592b6: Processing first storage report for DS-b6f3a15e-54cd-4d95-a3cf-56684e241138 from datanode DatanodeRegistration(127.0.0.1:39765, datanodeUuid=3b956ee6-a0cc-4267-9ec8-49e085f5d269, infoPort=38615, infoSecurePort=0, ipcPort=41875, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333) 2024-11-14T19:57:02,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd662e9d7bb74773 with lease ID 0xcf74028dba9592b6: from storage DS-b6f3a15e-54cd-4d95-a3cf-56684e241138 node DatanodeRegistration(127.0.0.1:39765, datanodeUuid=3b956ee6-a0cc-4267-9ec8-49e085f5d269, infoPort=38615, infoSecurePort=0, ipcPort=41875, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:57:02,538 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd662e9d7bb74773 with lease ID 0xcf74028dba9592b6: Processing first storage report for DS-cb3e10df-00ab-4698-8881-f85643c2c771 from datanode DatanodeRegistration(127.0.0.1:39765, datanodeUuid=3b956ee6-a0cc-4267-9ec8-49e085f5d269, infoPort=38615, infoSecurePort=0, ipcPort=41875, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333) 2024-11-14T19:57:02,538 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd662e9d7bb74773 with lease ID 0xcf74028dba9592b6: from storage DS-cb3e10df-00ab-4698-8881-f85643c2c771 node DatanodeRegistration(127.0.0.1:39765, datanodeUuid=3b956ee6-a0cc-4267-9ec8-49e085f5d269, infoPort=38615, infoSecurePort=0, ipcPort=41875, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:57:02,671 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data3/current/BP-882977484-172.17.0.2-1731614221333/current, will proceed with Du for space computation calculation, 2024-11-14T19:57:02,671 WARN [Thread-2522 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data4/current/BP-882977484-172.17.0.2-1731614221333/current, will proceed with Du for space computation calculation, 2024-11-14T19:57:02,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:02,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:02,690 WARN [Thread-2497 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T19:57:02,692 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b21b7655f989625 with lease ID 0xcf74028dba9592b7: Processing first storage report for DS-49f4ddc0-bbdc-4cc7-961d-f964becc0a13 from datanode DatanodeRegistration(127.0.0.1:42321, datanodeUuid=7319e287-792a-469d-a597-ab5a1af11bbd, infoPort=43831, infoSecurePort=0, ipcPort=43853, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333) 2024-11-14T19:57:02,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b21b7655f989625 with lease ID 0xcf74028dba9592b7: from storage DS-49f4ddc0-bbdc-4cc7-961d-f964becc0a13 node DatanodeRegistration(127.0.0.1:42321, datanodeUuid=7319e287-792a-469d-a597-ab5a1af11bbd, infoPort=43831, infoSecurePort=0, ipcPort=43853, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:57:02,692 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b21b7655f989625 with lease ID 0xcf74028dba9592b7: Processing first storage report for DS-5d60ebe1-5108-4d1b-900d-9e42392395a8 from datanode DatanodeRegistration(127.0.0.1:42321, datanodeUuid=7319e287-792a-469d-a597-ab5a1af11bbd, infoPort=43831, infoSecurePort=0, ipcPort=43853, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333) 2024-11-14T19:57:02,692 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b21b7655f989625 with lease ID 0xcf74028dba9592b7: from storage DS-5d60ebe1-5108-4d1b-900d-9e42392395a8 node DatanodeRegistration(127.0.0.1:42321, datanodeUuid=7319e287-792a-469d-a597-ab5a1af11bbd, infoPort=43831, infoSecurePort=0, ipcPort=43853, storageInfo=lv=-57;cid=testClusterID;nsid=1069303165;c=1731614221333), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T19:57:02,752 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b 2024-11-14T19:57:02,755 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/zookeeper_0, clientPort=62365, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T19:57:02,756 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62365 2024-11-14T19:57:02,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:02,759 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:02,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:57:02,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741825_1001 (size=7) 2024-11-14T19:57:02,781 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651 with version=8 2024-11-14T19:57:02,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38605/user/jenkins/test-data/39debd77-3f17-381d-a88f-4de57244cf1f/hbase-staging 2024-11-14T19:57:02,783 INFO [Time-limited test {}] client.ConnectionUtils(128): master/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T19:57:02,783 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:57:02,784 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43539 2024-11-14T19:57:02,785 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43539 connecting to ZooKeeper ensemble=127.0.0.1:62365 2024-11-14T19:57:02,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435390x0, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:57:02,861 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43539-0x1013c19ef4b0000 connected 2024-11-14T19:57:02,925 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:02,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:02,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:57:02,929 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651, hbase.cluster.distributed=false 2024-11-14T19:57:02,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:57:02,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43539 2024-11-14T19:57:02,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43539 2024-11-14T19:57:02,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43539 2024-11-14T19:57:02,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43539 2024-11-14T19:57:02,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43539 2024-11-14T19:57:02,953 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/867b237d0fa7:0 server-side Connection retries=45 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T19:57:02,953 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T19:57:02,954 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38657 2024-11-14T19:57:02,955 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38657 connecting to ZooKeeper ensemble=127.0.0.1:62365 2024-11-14T19:57:02,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:02,957 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:02,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:386570x0, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T19:57:02,966 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:386570x0, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:57:02,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38657-0x1013c19ef4b0001 connected 2024-11-14T19:57:02,967 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T19:57:02,967 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T19:57:02,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T19:57:02,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T19:57:02,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38657 2024-11-14T19:57:02,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38657 2024-11-14T19:57:02,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38657 2024-11-14T19:57:02,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38657 2024-11-14T19:57:02,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38657 2024-11-14T19:57:02,985 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;867b237d0fa7:43539 2024-11-14T19:57:02,985 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/867b237d0fa7,43539,1731614222783 2024-11-14T19:57:02,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:57:02,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:57:02,991 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/867b237d0fa7,43539,1731614222783 2024-11-14T19:57:02,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T19:57:02,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:02,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,000 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T19:57:03,001 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/867b237d0fa7,43539,1731614222783 from backup master directory 2024-11-14T19:57:03,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:57:03,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/867b237d0fa7,43539,1731614222783 2024-11-14T19:57:03,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T19:57:03,008 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:57:03,008 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=867b237d0fa7,43539,1731614222783 2024-11-14T19:57:03,011 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/hbase.id] with ID: ecbd85ba-dc7c-404d-bc13-2ca94b814dd0 2024-11-14T19:57:03,011 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/.tmp/hbase.id 2024-11-14T19:57:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:57:03,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741826_1002 (size=42) 2024-11-14T19:57:03,016 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/.tmp/hbase.id]:[hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/hbase.id] 2024-11-14T19:57:03,027 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:03,027 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T19:57:03,029 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T19:57:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:57:03,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741827_1003 (size=196) 2024-11-14T19:57:03,049 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T19:57:03,050 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T19:57:03,051 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:57:03,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:57:03,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741828_1004 (size=1189) 2024-11-14T19:57:03,062 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store 2024-11-14T19:57:03,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:57:03,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741829_1005 (size=34) 2024-11-14T19:57:03,073 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:57:03,073 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:57:03,073 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:03,073 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:03,073 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:57:03,073 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:03,073 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:03,073 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614223073Disabling compacts and flushes for region at 1731614223073Disabling writes for close at 1731614223073Writing region close event to WAL at 1731614223073Closed at 1731614223073 2024-11-14T19:57:03,075 WARN [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/.initializing 2024-11-14T19:57:03,075 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/WALs/867b237d0fa7,43539,1731614222783 2024-11-14T19:57:03,078 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C43539%2C1731614222783, suffix=, logDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/WALs/867b237d0fa7,43539,1731614222783, archiveDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/oldWALs, maxLogs=10 2024-11-14T19:57:03,078 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C43539%2C1731614222783.1731614223078 2024-11-14T19:57:03,089 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/WALs/867b237d0fa7,43539,1731614222783/867b237d0fa7%2C43539%2C1731614222783.1731614223078 2024-11-14T19:57:03,098 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43831:43831),(127.0.0.1/127.0.0.1:38615:38615)] 2024-11-14T19:57:03,101 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:57:03,102 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:57:03,102 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,102 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T19:57:03,107 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:03,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,109 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T19:57:03,109 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:57:03,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T19:57:03,112 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:57:03,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T19:57:03,113 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T19:57:03,114 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,115 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,115 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,116 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,116 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,117 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T19:57:03,118 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T19:57:03,123 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:57:03,124 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737913, jitterRate=-0.06169632077217102}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T19:57:03,124 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731614223102Initializing all the Stores at 1731614223103 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614223103Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614223105 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614223105Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614223105Cleaning up temporary data from old regions at 1731614223116 (+11 ms)Region opened successfully at 1731614223124 (+8 ms) 2024-11-14T19:57:03,126 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T19:57:03,129 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@478b9c19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:57:03,131 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T19:57:03,131 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T19:57:03,131 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T19:57:03,131 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T19:57:03,132 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T19:57:03,132 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T19:57:03,132 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T19:57:03,138 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T19:57:03,139 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T19:57:03,149 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T19:57:03,150 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T19:57:03,151 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T19:57:03,157 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T19:57:03,158 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T19:57:03,160 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T19:57:03,166 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T19:57:03,167 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T19:57:03,174 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T19:57:03,177 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T19:57:03,182 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T19:57:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:57:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T19:57:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,192 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=867b237d0fa7,43539,1731614222783, sessionid=0x1013c19ef4b0000, setting cluster-up flag (Was=false) 2024-11-14T19:57:03,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,234 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T19:57:03,235 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,43539,1731614222783 2024-11-14T19:57:03,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,274 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T19:57:03,275 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=867b237d0fa7,43539,1731614222783 2024-11-14T19:57:03,277 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T19:57:03,278 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T19:57:03,279 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T19:57:03,279 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T19:57:03,279 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 867b237d0fa7,43539,1731614222783 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T19:57:03,280 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:57:03,280 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:57:03,280 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:57:03,280 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/867b237d0fa7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T19:57:03,281 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/867b237d0fa7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T19:57:03,281 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,281 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:57:03,281 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,282 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:57:03,282 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T19:57:03,284 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,284 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731614253286 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T19:57:03,287 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T19:57:03,291 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,299 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T19:57:03,299 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T19:57:03,299 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T19:57:03,307 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T19:57:03,307 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T19:57:03,311 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614223307,5,FailOnTimeoutGroup] 2024-11-14T19:57:03,311 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614223311,5,FailOnTimeoutGroup] 2024-11-14T19:57:03,311 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,311 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T19:57:03,312 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,312 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:57:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741831_1007 (size=1321) 2024-11-14T19:57:03,314 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T19:57:03,315 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651 2024-11-14T19:57:03,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:57:03,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741832_1008 (size=32) 2024-11-14T19:57:03,373 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(746): ClusterId : ecbd85ba-dc7c-404d-bc13-2ca94b814dd0 2024-11-14T19:57:03,373 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T19:57:03,383 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T19:57:03,383 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T19:57:03,396 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T19:57:03,396 DEBUG [RS:0;867b237d0fa7:38657 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34175d78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=867b237d0fa7/172.17.0.2:0 2024-11-14T19:57:03,410 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;867b237d0fa7:38657 2024-11-14T19:57:03,410 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T19:57:03,410 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T19:57:03,410 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T19:57:03,411 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(2659): reportForDuty to master=867b237d0fa7,43539,1731614222783 with port=38657, startcode=1731614222953 2024-11-14T19:57:03,411 DEBUG [RS:0;867b237d0fa7:38657 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T19:57:03,421 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36757, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T19:57:03,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43539 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,422 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43539 {}] master.ServerManager(517): Registering regionserver=867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,424 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651 2024-11-14T19:57:03,424 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43879 2024-11-14T19:57:03,424 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T19:57:03,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:57:03,433 DEBUG [RS:0;867b237d0fa7:38657 {}] zookeeper.ZKUtil(111): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,433 WARN [RS:0;867b237d0fa7:38657 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T19:57:03,433 INFO [RS:0;867b237d0fa7:38657 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:57:03,433 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,434 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [867b237d0fa7,38657,1731614222953] 2024-11-14T19:57:03,437 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T19:57:03,438 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T19:57:03,439 INFO [RS:0;867b237d0fa7:38657 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T19:57:03,439 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,439 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T19:57:03,440 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T19:57:03,440 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/867b237d0fa7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/867b237d0fa7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:57:03,440 DEBUG [RS:0;867b237d0fa7:38657 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/867b237d0fa7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T19:57:03,441 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,441 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,441 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,441 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,441 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,441 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,38657,1731614222953-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:57:03,461 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T19:57:03,461 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,38657,1731614222953-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,462 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,462 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.Replication(171): 867b237d0fa7,38657,1731614222953 started 2024-11-14T19:57:03,483 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:03,483 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1482): Serving as 867b237d0fa7,38657,1731614222953, RpcServer on 867b237d0fa7/172.17.0.2:38657, sessionid=0x1013c19ef4b0001 2024-11-14T19:57:03,484 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T19:57:03,484 DEBUG [RS:0;867b237d0fa7:38657 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,484 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,38657,1731614222953' 2024-11-14T19:57:03,484 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T19:57:03,484 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T19:57:03,485 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T19:57:03,485 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T19:57:03,485 DEBUG [RS:0;867b237d0fa7:38657 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,485 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '867b237d0fa7,38657,1731614222953' 2024-11-14T19:57:03,485 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T19:57:03,485 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T19:57:03,486 DEBUG [RS:0;867b237d0fa7:38657 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T19:57:03,486 INFO [RS:0;867b237d0fa7:38657 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T19:57:03,486 INFO [RS:0;867b237d0fa7:38657 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T19:57:03,588 INFO [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C38657%2C1731614222953, suffix=, logDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/867b237d0fa7,38657,1731614222953, archiveDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs, maxLogs=32 2024-11-14T19:57:03,589 INFO [RS:0;867b237d0fa7:38657 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38657%2C1731614222953.1731614223589 2024-11-14T19:57:03,595 INFO [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/867b237d0fa7,38657,1731614222953/867b237d0fa7%2C38657%2C1731614222953.1731614223589 2024-11-14T19:57:03,603 DEBUG [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43831:43831),(127.0.0.1/127.0.0.1:38615:38615)] 2024-11-14T19:57:03,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:03,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:03,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:57:03,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:57:03,727 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:57:03,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:03,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:57:03,730 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:57:03,730 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:03,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:57:03,732 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:57:03,733 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:03,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:57:03,735 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:57:03,735 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:03,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:03,736 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:57:03,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740 2024-11-14T19:57:03,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740 2024-11-14T19:57:03,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:57:03,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:57:03,739 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:57:03,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:57:03,747 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T19:57:03,748 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862178, jitterRate=0.09631671011447906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:57:03,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731614223724Initializing all the Stores at 1731614223725 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614223725Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614223725Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614223725Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614223725Cleaning up temporary data from old regions at 1731614223739 (+14 ms)Region opened successfully at 1731614223748 (+9 ms) 2024-11-14T19:57:03,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:57:03,748 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:57:03,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:57:03,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:57:03,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:57:03,749 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:57:03,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614223748Disabling compacts and flushes for region at 1731614223748Disabling writes for close at 1731614223748Writing region close event to WAL at 1731614223749 (+1 ms)Closed at 1731614223749 2024-11-14T19:57:03,750 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:57:03,750 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T19:57:03,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T19:57:03,752 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:57:03,752 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T19:57:03,903 DEBUG [867b237d0fa7:43539 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T19:57:03,903 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=867b237d0fa7,38657,1731614222953 2024-11-14T19:57:03,905 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,38657,1731614222953, state=OPENING 2024-11-14T19:57:03,947 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T19:57:03,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:03,958 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:57:03,958 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:57:03,958 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T19:57:03,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,38657,1731614222953}] 2024-11-14T19:57:04,111 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T19:57:04,113 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44375, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T19:57:04,116 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T19:57:04,117 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:57:04,118 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=867b237d0fa7%2C38657%2C1731614222953.meta, suffix=.meta, logDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/867b237d0fa7,38657,1731614222953, archiveDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs, maxLogs=32 2024-11-14T19:57:04,119 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 867b237d0fa7%2C38657%2C1731614222953.meta.1731614224118.meta 2024-11-14T19:57:04,136 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/867b237d0fa7,38657,1731614222953/867b237d0fa7%2C38657%2C1731614222953.meta.1731614224118.meta 2024-11-14T19:57:04,138 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38615:38615),(127.0.0.1/127.0.0.1:43831:43831)] 2024-11-14T19:57:04,143 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T19:57:04,143 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T19:57:04,144 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T19:57:04,144 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T19:57:04,144 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T19:57:04,144 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T19:57:04,144 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T19:57:04,144 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T19:57:04,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T19:57:04,146 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T19:57:04,146 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:04,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:04,147 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T19:57:04,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T19:57:04,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:04,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:04,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T19:57:04,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T19:57:04,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:04,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:04,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T19:57:04,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T19:57:04,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T19:57:04,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T19:57:04,151 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T19:57:04,152 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740 2024-11-14T19:57:04,153 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740 2024-11-14T19:57:04,154 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T19:57:04,154 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T19:57:04,154 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T19:57:04,156 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T19:57:04,156 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725410, jitterRate=-0.07759417593479156}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T19:57:04,156 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T19:57:04,157 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731614224144Writing region info on filesystem at 1731614224144Initializing all the Stores at 1731614224145 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614224145Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614224145Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731614224145Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731614224145Cleaning up temporary data from old regions at 1731614224154 (+9 ms)Running coprocessor post-open hooks at 1731614224156 (+2 ms)Region opened successfully at 1731614224157 (+1 ms) 2024-11-14T19:57:04,157 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731614224111 2024-11-14T19:57:04,160 DEBUG [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T19:57:04,160 INFO [RS_OPEN_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T19:57:04,160 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=867b237d0fa7,38657,1731614222953 2024-11-14T19:57:04,161 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 867b237d0fa7,38657,1731614222953, state=OPEN 2024-11-14T19:57:04,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:57:04,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T19:57:04,185 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=867b237d0fa7,38657,1731614222953 2024-11-14T19:57:04,185 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:57:04,185 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T19:57:04,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T19:57:04,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=867b237d0fa7,38657,1731614222953 in 227 msec 2024-11-14T19:57:04,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T19:57:04,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 437 msec 2024-11-14T19:57:04,190 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T19:57:04,190 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T19:57:04,191 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:57:04,191 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,38657,1731614222953, seqNum=-1] 2024-11-14T19:57:04,192 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:57:04,193 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42205, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:57:04,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 920 msec 2024-11-14T19:57:04,199 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731614224199, completionTime=-1 2024-11-14T19:57:04,199 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T19:57:04,199 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T19:57:04,201 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T19:57:04,201 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731614284201 2024-11-14T19:57:04,201 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731614344201 2024-11-14T19:57:04,201 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-14T19:57:04,202 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43539,1731614222783-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,202 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43539,1731614222783-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,202 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43539,1731614222783-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,202 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-867b237d0fa7:43539, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,202 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,202 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,205 DEBUG [master/867b237d0fa7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.200sec 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43539,1731614222783-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T19:57:04,208 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43539,1731614222783-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T19:57:04,210 DEBUG [master/867b237d0fa7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T19:57:04,210 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T19:57:04,211 INFO [master/867b237d0fa7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=867b237d0fa7,43539,1731614222783-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T19:57:04,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46573749, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:57:04,274 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 867b237d0fa7,43539,-1 for getting cluster id 2024-11-14T19:57:04,274 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T19:57:04,279 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ecbd85ba-dc7c-404d-bc13-2ca94b814dd0' 2024-11-14T19:57:04,279 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T19:57:04,280 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ecbd85ba-dc7c-404d-bc13-2ca94b814dd0" 2024-11-14T19:57:04,280 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c0d7658, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:57:04,280 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [867b237d0fa7,43539,-1] 2024-11-14T19:57:04,280 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T19:57:04,280 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:04,281 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32900, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T19:57:04,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9d81a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T19:57:04,283 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T19:57:04,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=867b237d0fa7,38657,1731614222953, seqNum=-1] 2024-11-14T19:57:04,284 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T19:57:04,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36580, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T19:57:04,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=867b237d0fa7,43539,1731614222783 2024-11-14T19:57:04,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T19:57:04,290 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T19:57:04,291 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T19:57:04,293 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/test.com,8080,1, archiveDir=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs, maxLogs=32 2024-11-14T19:57:04,294 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731614224293 2024-11-14T19:57:04,306 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/test.com,8080,1/test.com%2C8080%2C1.1731614224293 2024-11-14T19:57:04,319 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38615:38615),(127.0.0.1/127.0.0.1:43831:43831)] 2024-11-14T19:57:04,330 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731614224330 2024-11-14T19:57:04,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,340 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,340 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/test.com,8080,1/test.com%2C8080%2C1.1731614224293 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/test.com,8080,1/test.com%2C8080%2C1.1731614224330 2024-11-14T19:57:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741835_1011 (size=93) 2024-11-14T19:57:04,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741835_1011 (size=93) 2024-11-14T19:57:04,351 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/WALs/test.com,8080,1/test.com%2C8080%2C1.1731614224293 to hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs/test.com%2C8080%2C1.1731614224293 2024-11-14T19:57:04,355 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43831:43831),(127.0.0.1/127.0.0.1:38615:38615)] 2024-11-14T19:57:04,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,356 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741836_1012 (size=93) 2024-11-14T19:57:04,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741836_1012 (size=93) 2024-11-14T19:57:04,360 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs 2024-11-14T19:57:04,360 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731614224330) 2024-11-14T19:57:04,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T19:57:04,360 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:57:04,360 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:57:04,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:04,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:04,361 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T19:57:04,361 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T19:57:04,361 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1154314548, stopped=false 2024-11-14T19:57:04,361 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=867b237d0fa7,43539,1731614222783 2024-11-14T19:57:04,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:57:04,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T19:57:04,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:04,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:04,374 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:57:04,375 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T19:57:04,375 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:57:04,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:04,375 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:57:04,375 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '867b237d0fa7,38657,1731614222953' ***** 2024-11-14T19:57:04,375 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T19:57:04,375 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T19:57:04,376 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(959): stopping server 867b237d0fa7,38657,1731614222953 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;867b237d0fa7:38657. 2024-11-14T19:57:04,376 DEBUG [RS:0;867b237d0fa7:38657 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T19:57:04,376 DEBUG [RS:0;867b237d0fa7:38657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:04,376 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T19:57:04,377 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T19:57:04,377 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T19:57:04,377 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T19:57:04,380 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T19:57:04,380 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T19:57:04,380 DEBUG [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T19:57:04,380 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T19:57:04,380 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T19:57:04,380 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T19:57:04,380 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T19:57:04,380 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T19:57:04,381 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T19:57:04,403 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/.tmp/ns/6bef82c29bb54e5b9b577b881ca786a6 is 43, key is default/ns:d/1731614224193/Put/seqid=0 2024-11-14T19:57:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741837_1013 (size=5153) 2024-11-14T19:57:04,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741837_1013 (size=5153) 2024-11-14T19:57:04,409 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/.tmp/ns/6bef82c29bb54e5b9b577b881ca786a6 2024-11-14T19:57:04,417 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/.tmp/ns/6bef82c29bb54e5b9b577b881ca786a6 as hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/ns/6bef82c29bb54e5b9b577b881ca786a6 2024-11-14T19:57:04,424 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/ns/6bef82c29bb54e5b9b577b881ca786a6, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T19:57:04,425 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false 2024-11-14T19:57:04,430 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T19:57:04,431 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T19:57:04,431 INFO [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T19:57:04,431 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731614224380Running coprocessor pre-close hooks at 1731614224380Disabling compacts and flushes for region at 1731614224380Disabling writes for close at 1731614224380Obtaining lock to block concurrent updates at 1731614224381 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731614224381Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731614224381Flushing stores of hbase:meta,,1.1588230740 at 1731614224382 (+1 ms)Flushing 1588230740/ns: creating writer at 1731614224382Flushing 1588230740/ns: appending metadata at 1731614224402 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731614224402Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19f708d2: reopening flushed file at 1731614224416 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 45ms, sequenceid=6, compaction requested=false at 1731614224425 (+9 ms)Writing region close event to WAL at 1731614224426 (+1 ms)Running coprocessor post-close hooks at 1731614224431 (+5 ms)Closed at 1731614224431 2024-11-14T19:57:04,431 DEBUG [RS_CLOSE_META-regionserver/867b237d0fa7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T19:57:04,442 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T19:57:04,442 INFO [regionserver/867b237d0fa7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T19:57:04,580 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(976): stopping server 867b237d0fa7,38657,1731614222953; all regions closed. 2024-11-14T19:57:04,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,581 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,581 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,581 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741834_1010 (size=1152) 2024-11-14T19:57:04,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741834_1010 (size=1152) 2024-11-14T19:57:04,586 DEBUG [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs 2024-11-14T19:57:04,586 INFO [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C38657%2C1731614222953.meta:.meta(num 1731614224118) 2024-11-14T19:57:04,587 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,587 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741833_1009 (size=93) 2024-11-14T19:57:04,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741833_1009 (size=93) 2024-11-14T19:57:04,593 DEBUG [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/oldWALs 2024-11-14T19:57:04,593 INFO [RS:0;867b237d0fa7:38657 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 867b237d0fa7%2C38657%2C1731614222953:(num 1731614223589) 2024-11-14T19:57:04,593 DEBUG [RS:0;867b237d0fa7:38657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T19:57:04,593 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T19:57:04,593 INFO [RS:0;867b237d0fa7:38657 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:57:04,593 INFO [RS:0;867b237d0fa7:38657 {}] hbase.ChoreService(370): Chore service for: regionserver/867b237d0fa7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T19:57:04,594 INFO [RS:0;867b237d0fa7:38657 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:57:04,594 INFO [regionserver/867b237d0fa7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:57:04,594 INFO [RS:0;867b237d0fa7:38657 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38657 2024-11-14T19:57:04,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T19:57:04,599 INFO [RS:0;867b237d0fa7:38657 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:57:04,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/867b237d0fa7,38657,1731614222953 2024-11-14T19:57:04,608 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [867b237d0fa7,38657,1731614222953] 2024-11-14T19:57:04,616 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/867b237d0fa7,38657,1731614222953 already deleted, retry=false 2024-11-14T19:57:04,616 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 867b237d0fa7,38657,1731614222953 expired; onlineServers=0 2024-11-14T19:57:04,616 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '867b237d0fa7,43539,1731614222783' ***** 2024-11-14T19:57:04,616 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T19:57:04,616 INFO [M:0;867b237d0fa7:43539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T19:57:04,616 INFO [M:0;867b237d0fa7:43539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T19:57:04,616 DEBUG [M:0;867b237d0fa7:43539 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T19:57:04,616 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T19:57:04,616 DEBUG [M:0;867b237d0fa7:43539 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T19:57:04,617 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614223307 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.large.0-1731614223307,5,FailOnTimeoutGroup] 2024-11-14T19:57:04,617 INFO [M:0;867b237d0fa7:43539 {}] hbase.ChoreService(370): Chore service for: master/867b237d0fa7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T19:57:04,617 INFO [M:0;867b237d0fa7:43539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T19:57:04,617 DEBUG [M:0;867b237d0fa7:43539 {}] master.HMaster(1795): Stopping service threads 2024-11-14T19:57:04,617 INFO [M:0;867b237d0fa7:43539 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T19:57:04,617 INFO [M:0;867b237d0fa7:43539 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T19:57:04,617 INFO [M:0;867b237d0fa7:43539 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T19:57:04,617 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T19:57:04,622 DEBUG [master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614223311 {}] cleaner.HFileCleaner(306): Exit Thread[master/867b237d0fa7:0:becomeActiveMaster-HFileCleaner.small.0-1731614223311,5,FailOnTimeoutGroup] 2024-11-14T19:57:04,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T19:57:04,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T19:57:04,641 DEBUG [M:0;867b237d0fa7:43539 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-14T19:57:04,641 DEBUG [M:0;867b237d0fa7:43539 {}] master.ActiveMasterManager(353): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-14T19:57:04,642 INFO [M:0;867b237d0fa7:43539 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/.lastflushedseqids 2024-11-14T19:57:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741838_1014 (size=99) 2024-11-14T19:57:04,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741838_1014 (size=99) 2024-11-14T19:57:04,652 INFO [M:0;867b237d0fa7:43539 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T19:57:04,652 INFO [M:0;867b237d0fa7:43539 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T19:57:04,653 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T19:57:04,653 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:04,653 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:04,653 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T19:57:04,653 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:04,653 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T19:57:04,675 DEBUG [M:0;867b237d0fa7:43539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc5d6a96d1cd485b9df0a6c30ce23bf5 is 82, key is hbase:meta,,1/info:regioninfo/1731614224160/Put/seqid=0 2024-11-14T19:57:04,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741839_1015 (size=5672) 2024-11-14T19:57:04,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741839_1015 (size=5672) 2024-11-14T19:57:04,681 INFO [M:0;867b237d0fa7:43539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc5d6a96d1cd485b9df0a6c30ce23bf5 2024-11-14T19:57:04,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,38355,1731614005730/867b237d0fa7%2C38355%2C1731614005730.meta.1731614006711.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:04,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35675/user/jenkins/test-data/c6761ad3-cd52-ebde-603d-282b6badf5c1/WALs/867b237d0fa7,37273,1731614006888/867b237d0fa7%2C37273%2C1731614006888.1731614007102 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T19:57:04,704 DEBUG [M:0;867b237d0fa7:43539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/42ae47df7ecb4a84ba8c11c4726ce6ba is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731614224198/Put/seqid=0 2024-11-14T19:57:04,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:04,708 INFO [RS:0;867b237d0fa7:38657 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:57:04,708 INFO [RS:0;867b237d0fa7:38657 {}] regionserver.HRegionServer(1031): Exiting; stopping=867b237d0fa7,38657,1731614222953; zookeeper connection closed. 2024-11-14T19:57:04,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38657-0x1013c19ef4b0001, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741840_1016 (size=5275) 2024-11-14T19:57:04,709 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c1579c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c1579c 2024-11-14T19:57:04,709 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T19:57:04,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741840_1016 (size=5275) 2024-11-14T19:57:04,710 INFO [M:0;867b237d0fa7:43539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/42ae47df7ecb4a84ba8c11c4726ce6ba 2024-11-14T19:57:04,733 DEBUG [M:0;867b237d0fa7:43539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c04bdaae4df4b54b3be5bbb9081bd24 is 69, key is 867b237d0fa7,38657,1731614222953/rs:state/1731614223422/Put/seqid=0 2024-11-14T19:57:04,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741841_1017 (size=5156) 2024-11-14T19:57:04,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741841_1017 (size=5156) 2024-11-14T19:57:04,739 INFO [M:0;867b237d0fa7:43539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c04bdaae4df4b54b3be5bbb9081bd24 2024-11-14T19:57:04,761 DEBUG [M:0;867b237d0fa7:43539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f2b9730206084a4eaa8c4b20af64f74d is 52, key is load_balancer_on/state:d/1731614224290/Put/seqid=0 2024-11-14T19:57:04,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741842_1018 (size=5056) 2024-11-14T19:57:04,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741842_1018 (size=5056) 2024-11-14T19:57:04,768 INFO [M:0;867b237d0fa7:43539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f2b9730206084a4eaa8c4b20af64f74d 2024-11-14T19:57:04,774 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc5d6a96d1cd485b9df0a6c30ce23bf5 as hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc5d6a96d1cd485b9df0a6c30ce23bf5 2024-11-14T19:57:04,779 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc5d6a96d1cd485b9df0a6c30ce23bf5, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T19:57:04,780 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/42ae47df7ecb4a84ba8c11c4726ce6ba as hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/42ae47df7ecb4a84ba8c11c4726ce6ba 2024-11-14T19:57:04,786 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/42ae47df7ecb4a84ba8c11c4726ce6ba, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T19:57:04,787 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7c04bdaae4df4b54b3be5bbb9081bd24 as hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c04bdaae4df4b54b3be5bbb9081bd24 2024-11-14T19:57:04,792 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7c04bdaae4df4b54b3be5bbb9081bd24, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T19:57:04,793 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f2b9730206084a4eaa8c4b20af64f74d as hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f2b9730206084a4eaa8c4b20af64f74d 2024-11-14T19:57:04,798 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43879/user/jenkins/test-data/92744631-ba26-bed5-46b6-d44322408651/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f2b9730206084a4eaa8c4b20af64f74d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T19:57:04,799 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-11-14T19:57:04,804 INFO [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T19:57:04,804 DEBUG [M:0;867b237d0fa7:43539 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731614224653Disabling compacts and flushes for region at 1731614224653Disabling writes for close at 1731614224653Obtaining lock to block concurrent updates at 1731614224653Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731614224653Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731614224653Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731614224654 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731614224654Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731614224674 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731614224674Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731614224686 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731614224703 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731614224703Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731614224716 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731614224733 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731614224733Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731614224743 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731614224760 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731614224760Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ad55694: reopening flushed file at 1731614224772 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cc5175a: reopening flushed file at 1731614224779 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38fa5b73: reopening flushed file at 1731614224786 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7027ce59: reopening flushed file at 1731614224792 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1731614224799 (+7 ms)Writing region close event to WAL at 1731614224804 (+5 ms)Closed at 1731614224804 2024-11-14T19:57:04,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,804 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,804 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,804 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T19:57:04,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39765 is added to blk_1073741830_1006 (size=10311) 2024-11-14T19:57:04,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42321 is added to blk_1073741830_1006 (size=10311) 2024-11-14T19:57:05,209 INFO [M:0;867b237d0fa7:43539 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T19:57:05,209 INFO [M:0;867b237d0fa7:43539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43539 2024-11-14T19:57:05,209 INFO [M:0;867b237d0fa7:43539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T19:57:05,209 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T19:57:05,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:05,348 INFO [M:0;867b237d0fa7:43539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T19:57:05,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43539-0x1013c19ef4b0000, quorum=127.0.0.1:62365, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T19:57:05,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f3d7578{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:57:05,350 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5307a3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:57:05,350 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:57:05,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@769a95c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:57:05,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7914027{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir/,STOPPED} 2024-11-14T19:57:05,352 WARN [BP-882977484-172.17.0.2-1731614221333 heartbeating to localhost/127.0.0.1:43879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:57:05,352 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:57:05,352 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:57:05,352 WARN [BP-882977484-172.17.0.2-1731614221333 heartbeating to localhost/127.0.0.1:43879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882977484-172.17.0.2-1731614221333 (Datanode Uuid 7319e287-792a-469d-a597-ab5a1af11bbd) service to localhost/127.0.0.1:43879 2024-11-14T19:57:05,352 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data3/current/BP-882977484-172.17.0.2-1731614221333 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:05,353 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data4/current/BP-882977484-172.17.0.2-1731614221333 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:05,353 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:57:05,355 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77086409{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T19:57:05,355 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7596481a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:57:05,355 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:57:05,355 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6479a298{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:57:05,355 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d36c2e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir/,STOPPED} 2024-11-14T19:57:05,356 WARN [BP-882977484-172.17.0.2-1731614221333 heartbeating to localhost/127.0.0.1:43879 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T19:57:05,356 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T19:57:05,356 WARN [BP-882977484-172.17.0.2-1731614221333 heartbeating to localhost/127.0.0.1:43879 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-882977484-172.17.0.2-1731614221333 (Datanode Uuid 3b956ee6-a0cc-4267-9ec8-49e085f5d269) service to localhost/127.0.0.1:43879 2024-11-14T19:57:05,356 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T19:57:05,357 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data1/current/BP-882977484-172.17.0.2-1731614221333 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:05,357 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/cluster_54a0df41-c447-f1ad-8be8-4f89a04e743f/data/data2/current/BP-882977484-172.17.0.2-1731614221333 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T19:57:05,358 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T19:57:05,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1223f548{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T19:57:05,367 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40cff3e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T19:57:05,367 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T19:57:05,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56ff1634{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T19:57:05,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42db1fad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/27e5752b-5184-cacf-ede6-54714929153b/hadoop.log.dir/,STOPPED} 2024-11-14T19:57:05,373 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T19:57:05,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T19:57:05,407 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=266 (was 226) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43879 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:43879 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:43879 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065151629) connection to localhost/127.0.0.1:43879 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:43879 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43879 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=535 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=206 (was 189) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5162 (was 5205)