2024-12-09 11:21:19,621 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 11:21:19,637 main DEBUG Took 0.013822 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 11:21:19,637 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 11:21:19,638 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 11:21:19,639 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 11:21:19,641 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,649 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 11:21:19,665 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,666 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,667 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,668 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,668 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,669 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,670 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,670 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,671 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,671 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,672 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,672 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,673 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,674 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,674 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,675 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,676 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,677 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,678 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 11:21:19,679 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,679 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 11:21:19,681 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 11:21:19,683 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 11:21:19,685 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 11:21:19,685 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 11:21:19,687 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 11:21:19,687 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 11:21:19,697 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 11:21:19,700 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 11:21:19,702 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 11:21:19,702 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 11:21:19,702 main DEBUG createAppenders(={Console}) 2024-12-09 11:21:19,703 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-09 11:21:19,703 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 11:21:19,704 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-09 11:21:19,704 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 11:21:19,704 main DEBUG OutputStream closed 2024-12-09 11:21:19,704 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 11:21:19,705 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 11:21:19,705 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-09 11:21:19,791 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 11:21:19,793 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 11:21:19,794 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 11:21:19,795 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 11:21:19,796 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 11:21:19,796 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 11:21:19,797 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 11:21:19,797 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 11:21:19,797 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 11:21:19,798 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 11:21:19,798 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 11:21:19,799 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 11:21:19,799 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 11:21:19,800 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 11:21:19,800 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 11:21:19,800 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 11:21:19,801 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 11:21:19,802 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 11:21:19,805 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 11:21:19,805 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 11:21:19,805 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 11:21:19,807 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T11:21:20,240 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005 2024-12-09 11:21:20,245 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 11:21:20,246 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T11:21:20,258 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T11:21:20,304 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=472, ProcessCount=11, AvailableMemoryMB=3057 2024-12-09T11:21:20,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:21:20,334 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5, deleteOnExit=true 2024-12-09T11:21:20,334 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:21:20,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/test.cache.data in system properties and HBase conf 2024-12-09T11:21:20,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:21:20,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:21:20,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:21:20,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:21:20,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:21:20,501 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T11:21:20,670 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:21:20,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:21:20,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:21:20,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:21:20,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:21:20,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:21:20,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:21:20,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:21:20,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:21:20,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:21:20,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:21:20,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:21:20,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:21:20,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:21:20,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:21:21,271 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:21:21,847 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T11:21:21,984 INFO [Time-limited test {}] log.Log(170): Logging initialized @3256ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T11:21:22,092 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:21:22,186 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:21:22,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:21:22,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:21:22,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:21:22,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:21:22,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:21:22,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:21:22,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f961078{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/java.io.tmpdir/jetty-localhost-32953-hadoop-hdfs-3_4_1-tests_jar-_-any-3123705574370043441/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:21:22,611 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:32953} 2024-12-09T11:21:22,612 INFO [Time-limited test {}] server.Server(415): Started @3885ms 2024-12-09T11:21:22,661 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:21:23,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:21:23,312 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:21:23,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:21:23,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:21:23,318 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:21:23,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@616d254c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:21:23,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198fe7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:21:23,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32c41a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/java.io.tmpdir/jetty-localhost-43783-hadoop-hdfs-3_4_1-tests_jar-_-any-6007210773070598225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:21:23,485 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21c64e78{HTTP/1.1, (http/1.1)}{localhost:43783} 2024-12-09T11:21:23,486 INFO [Time-limited test {}] server.Server(415): Started @4759ms 2024-12-09T11:21:23,562 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:21:23,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:21:23,745 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:21:23,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:21:23,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:21:23,753 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:21:23,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1612a852{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:21:23,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e06ea5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:21:23,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78be0d39{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/java.io.tmpdir/jetty-localhost-42399-hadoop-hdfs-3_4_1-tests_jar-_-any-1091851010243672210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:21:23,920 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@617aa169{HTTP/1.1, (http/1.1)}{localhost:42399} 2024-12-09T11:21:23,921 INFO [Time-limited test {}] server.Server(415): Started @5194ms 2024-12-09T11:21:23,924 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:21:24,162 WARN [Thread-94 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data1/current/BP-1173096048-172.17.0.3-1733743281430/current, will proceed with Du for space computation calculation, 2024-12-09T11:21:24,163 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data2/current/BP-1173096048-172.17.0.3-1733743281430/current, will proceed with Du for space computation calculation, 2024-12-09T11:21:24,169 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data3/current/BP-1173096048-172.17.0.3-1733743281430/current, will proceed with Du for space computation calculation, 2024-12-09T11:21:24,170 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data4/current/BP-1173096048-172.17.0.3-1733743281430/current, will proceed with Du for space computation calculation, 2024-12-09T11:21:24,260 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:21:24,305 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:21:24,366 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x550057cf318fce3f with lease ID 0xaec7c1e4e09927a8: Processing first storage report for DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6 from datanode DatanodeRegistration(127.0.0.1:42561, datanodeUuid=80241328-4d93-4f1d-b318-dc9112f4d4bb, infoPort=39453, infoSecurePort=0, ipcPort=42007, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430) 2024-12-09T11:21:24,368 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x550057cf318fce3f with lease ID 0xaec7c1e4e09927a8: from storage DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6 node DatanodeRegistration(127.0.0.1:42561, datanodeUuid=80241328-4d93-4f1d-b318-dc9112f4d4bb, infoPort=39453, infoSecurePort=0, ipcPort=42007, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T11:21:24,368 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fa4d6858152dc9a with lease ID 0xaec7c1e4e09927a9: Processing first storage report for DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195 from datanode DatanodeRegistration(127.0.0.1:38779, datanodeUuid=ffc48d17-9d93-4924-a708-4450516f13ee, infoPort=37551, infoSecurePort=0, ipcPort=40635, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430) 2024-12-09T11:21:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fa4d6858152dc9a with lease ID 0xaec7c1e4e09927a9: from storage DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195 node DatanodeRegistration(127.0.0.1:38779, datanodeUuid=ffc48d17-9d93-4924-a708-4450516f13ee, infoPort=37551, infoSecurePort=0, ipcPort=40635, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:21:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x550057cf318fce3f with lease ID 0xaec7c1e4e09927a8: Processing first storage report for DS-e3ece8ac-7cad-42cb-b943-e26fee758181 from datanode DatanodeRegistration(127.0.0.1:42561, datanodeUuid=80241328-4d93-4f1d-b318-dc9112f4d4bb, infoPort=39453, infoSecurePort=0, ipcPort=42007, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430) 2024-12-09T11:21:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x550057cf318fce3f with lease ID 0xaec7c1e4e09927a8: from storage DS-e3ece8ac-7cad-42cb-b943-e26fee758181 node DatanodeRegistration(127.0.0.1:42561, datanodeUuid=80241328-4d93-4f1d-b318-dc9112f4d4bb, infoPort=39453, infoSecurePort=0, ipcPort=42007, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:21:24,370 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fa4d6858152dc9a with lease ID 0xaec7c1e4e09927a9: Processing first storage report for DS-722281b3-cb4a-446a-a60b-eaa77ebc23fa from datanode DatanodeRegistration(127.0.0.1:38779, datanodeUuid=ffc48d17-9d93-4924-a708-4450516f13ee, infoPort=37551, infoSecurePort=0, ipcPort=40635, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430) 2024-12-09T11:21:24,370 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fa4d6858152dc9a with lease ID 0xaec7c1e4e09927a9: from storage DS-722281b3-cb4a-446a-a60b-eaa77ebc23fa node DatanodeRegistration(127.0.0.1:38779, datanodeUuid=ffc48d17-9d93-4924-a708-4450516f13ee, infoPort=37551, infoSecurePort=0, ipcPort=40635, storageInfo=lv=-57;cid=testClusterID;nsid=749984916;c=1733743281430), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:21:24,530 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005 2024-12-09T11:21:24,669 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/zookeeper_0, clientPort=61679, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:21:24,681 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61679 2024-12-09T11:21:24,698 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:24,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:25,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:21:25,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:21:25,468 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561 with version=8 2024-12-09T11:21:25,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:21:25,567 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T11:21:25,889 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:21:25,905 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:21:25,906 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:21:25,913 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:21:25,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:21:25,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:21:26,094 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:21:26,176 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T11:21:26,188 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T11:21:26,193 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:21:26,218 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28024 (auto-detected) 2024-12-09T11:21:26,220 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-09T11:21:26,239 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35265 2024-12-09T11:21:26,261 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35265 connecting to ZooKeeper ensemble=127.0.0.1:61679 2024-12-09T11:21:26,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352650x0, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:21:26,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35265-0x1012aeaa1ad0000 connected 2024-12-09T11:21:26,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:26,336 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:26,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:21:26,360 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561, hbase.cluster.distributed=false 2024-12-09T11:21:26,393 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:21:26,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35265 2024-12-09T11:21:26,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35265 2024-12-09T11:21:26,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35265 2024-12-09T11:21:26,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35265 2024-12-09T11:21:26,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35265 2024-12-09T11:21:26,557 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:21:26,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:21:26,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:21:26,560 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:21:26,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:21:26,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:21:26,563 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:21:26,566 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:21:26,567 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45837 2024-12-09T11:21:26,569 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45837 connecting to ZooKeeper ensemble=127.0.0.1:61679 2024-12-09T11:21:26,571 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:26,576 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:26,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458370x0, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:21:26,587 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45837-0x1012aeaa1ad0001 connected 2024-12-09T11:21:26,587 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:21:26,593 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:21:26,602 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:21:26,605 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:21:26,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:21:26,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45837 2024-12-09T11:21:26,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45837 2024-12-09T11:21:26,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45837 2024-12-09T11:21:26,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45837 2024-12-09T11:21:26,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45837 2024-12-09T11:21:26,631 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:35265 2024-12-09T11:21:26,632 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:26,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:21:26,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:21:26,642 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:26,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:21:26,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:26,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:26,677 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:21:26,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,35265,1733743285620 from backup master directory 2024-12-09T11:21:26,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:21:26,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:26,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:21:26,683 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:21:26,684 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:26,686 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T11:21:26,687 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T11:21:26,757 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase.id] with ID: 95584051-85e0-4a54-a7bc-4c4fa87dc41e 2024-12-09T11:21:26,757 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/.tmp/hbase.id 2024-12-09T11:21:26,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:21:26,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:21:26,774 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/.tmp/hbase.id]:[hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase.id] 2024-12-09T11:21:26,858 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:26,863 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:21:26,890 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-12-09T11:21:26,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:26,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:26,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:21:26,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:21:26,934 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:21:26,936 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:21:26,944 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:21:27,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:21:27,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:21:27,437 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store 2024-12-09T11:21:27,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:21:27,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:21:27,876 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T11:21:27,880 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:27,882 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:21:27,882 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:27,882 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:27,884 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:21:27,885 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:27,885 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:21:27,886 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743287882Disabling compacts and flushes for region at 1733743287882Disabling writes for close at 1733743287884 (+2 ms)Writing region close event to WAL at 1733743287885 (+1 ms)Closed at 1733743287885 2024-12-09T11:21:27,889 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/.initializing 2024-12-09T11:21:27,890 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/WALs/2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:27,917 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C35265%2C1733743285620, suffix=, logDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/WALs/2dff3a36d44f,35265,1733743285620, archiveDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/oldWALs, maxLogs=10 2024-12-09T11:21:27,928 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C35265%2C1733743285620.1733743287923 2024-12-09T11:21:27,956 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/WALs/2dff3a36d44f,35265,1733743285620/2dff3a36d44f%2C35265%2C1733743285620.1733743287923 2024-12-09T11:21:27,969 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:39453:39453)] 2024-12-09T11:21:27,972 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:27,973 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:27,978 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:27,979 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:21:28,080 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:28,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:21:28,088 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:28,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:21:28,099 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:28,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:21:28,107 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,108 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:28,109 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,115 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,117 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,130 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,131 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,136 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:21:28,143 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:21:28,166 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:21:28,169 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689153, jitterRate=-0.12369722127914429}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:21:28,178 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743287998Initializing all the Stores at 1733743288005 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743288005Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743288006 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743288007 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743288007Cleaning up temporary data from old regions at 1733743288131 (+124 ms)Region opened successfully at 1733743288178 (+47 ms) 2024-12-09T11:21:28,182 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:21:28,249 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b952cbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:21:28,294 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:21:28,310 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:21:28,311 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:21:28,314 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:21:28,315 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T11:21:28,323 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-12-09T11:21:28,323 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:21:28,367 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:21:28,385 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:21:28,394 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:21:28,398 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:21:28,400 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:21:28,407 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:21:28,409 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:21:28,414 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:21:28,419 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:21:28,420 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:21:28,422 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:21:28,458 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:21:28,463 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:21:28,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:21:28,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:28,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:21:28,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:28,474 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,35265,1733743285620, sessionid=0x1012aeaa1ad0000, setting cluster-up flag (Was=false) 2024-12-09T11:21:28,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:28,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:28,519 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:21:28,527 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:28,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:28,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:28,551 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:21:28,558 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:28,583 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:21:28,623 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(746): ClusterId : 95584051-85e0-4a54-a7bc-4c4fa87dc41e 2024-12-09T11:21:28,626 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:21:28,632 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:21:28,632 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:21:28,639 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:21:28,642 DEBUG [RS:0;2dff3a36d44f:45837 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@408b39b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:21:28,676 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:45837 2024-12-09T11:21:28,680 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:21:28,681 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:21:28,681 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:21:28,685 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,35265,1733743285620 with port=45837, startcode=1733743286499 2024-12-09T11:21:28,691 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:21:28,700 DEBUG [RS:0;2dff3a36d44f:45837 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:21:28,709 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:21:28,737 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:21:28,754 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,35265,1733743285620 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:21:28,770 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:21:28,771 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:21:28,771 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:21:28,771 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:21:28,771 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:21:28,772 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:28,772 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:21:28,772 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:28,798 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:21:28,799 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743318799 2024-12-09T11:21:28,801 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:21:28,803 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:21:28,804 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:21:28,810 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:21:28,810 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:21:28,811 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:21:28,811 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:21:28,818 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:28,830 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,831 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:21:28,831 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:21:28,832 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:21:28,833 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:21:28,837 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:21:28,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:21:28,842 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:21:28,846 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35265 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:21:28,863 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743288843,5,FailOnTimeoutGroup] 2024-12-09T11:21:28,864 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743288863,5,FailOnTimeoutGroup] 2024-12-09T11:21:28,864 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:28,864 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:21:28,866 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:28,866 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:28,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:21:28,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:21:28,879 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:21:28,880 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561 2024-12-09T11:21:28,884 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T11:21:28,884 WARN [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T11:21:28,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:21:28,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:21:28,897 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:28,899 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:21:28,902 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:21:28,902 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:28,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:21:28,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:21:28,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:28,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:21:28,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:21:28,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:28,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:21:28,917 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:21:28,918 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:28,919 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:28,919 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:21:28,920 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740 2024-12-09T11:21:28,921 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740 2024-12-09T11:21:28,928 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:21:28,928 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:21:28,929 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:21:28,932 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:21:28,937 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:21:28,939 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769266, jitterRate=-0.0218278169631958}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:21:28,942 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743288897Initializing all the Stores at 1733743288899 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743288899Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743288899Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743288899Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743288899Cleaning up temporary data from old regions at 1733743288928 (+29 ms)Region opened successfully at 1733743288942 (+14 ms) 2024-12-09T11:21:28,942 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:21:28,942 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:21:28,942 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:21:28,943 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:21:28,943 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:21:28,944 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:21:28,945 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743288942Disabling compacts and flushes for region at 1733743288942Disabling writes for close at 1733743288943 (+1 ms)Writing region close event to WAL at 1733743288944 (+1 ms)Closed at 1733743288944 2024-12-09T11:21:28,949 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:21:28,949 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:21:28,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:21:28,969 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:21:28,979 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:21:28,985 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,35265,1733743285620 with port=45837, startcode=1733743286499 2024-12-09T11:21:28,991 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35265 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:28,994 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35265 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,006 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561 2024-12-09T11:21:29,006 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38079 2024-12-09T11:21:29,006 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:21:29,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:21:29,013 DEBUG [RS:0;2dff3a36d44f:45837 {}] zookeeper.ZKUtil(111): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,013 WARN [RS:0;2dff3a36d44f:45837 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:21:29,013 INFO [RS:0;2dff3a36d44f:45837 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:21:29,013 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,014 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,45837,1733743286499] 2024-12-09T11:21:29,043 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:21:29,057 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:21:29,062 INFO [RS:0;2dff3a36d44f:45837 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:21:29,062 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,063 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:21:29,068 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:21:29,070 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,070 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,070 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,070 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,071 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,071 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,072 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:21:29,073 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:21:29,073 DEBUG [RS:0;2dff3a36d44f:45837 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:21:29,079 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,079 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,079 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,079 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,080 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,080 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,45837,1733743286499-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:21:29,110 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:21:29,112 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,45837,1733743286499-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,112 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,113 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.Replication(171): 2dff3a36d44f,45837,1733743286499 started 2024-12-09T11:21:29,131 WARN [2dff3a36d44f:35265 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T11:21:29,142 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,143 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,45837,1733743286499, RpcServer on 2dff3a36d44f/172.17.0.3:45837, sessionid=0x1012aeaa1ad0001 2024-12-09T11:21:29,144 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:21:29,144 DEBUG [RS:0;2dff3a36d44f:45837 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,144 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,45837,1733743286499' 2024-12-09T11:21:29,144 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:21:29,152 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:21:29,153 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:21:29,153 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:21:29,154 DEBUG [RS:0;2dff3a36d44f:45837 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,154 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,45837,1733743286499' 2024-12-09T11:21:29,154 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:21:29,155 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:21:29,156 DEBUG [RS:0;2dff3a36d44f:45837 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:21:29,157 INFO [RS:0;2dff3a36d44f:45837 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:21:29,157 INFO [RS:0;2dff3a36d44f:45837 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:21:29,268 INFO [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C45837%2C1733743286499, suffix=, logDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499, archiveDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs, maxLogs=32 2024-12-09T11:21:29,273 INFO [RS:0;2dff3a36d44f:45837 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743289271 2024-12-09T11:21:29,312 INFO [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743289271 2024-12-09T11:21:29,336 DEBUG [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:37551:37551)] 2024-12-09T11:21:29,384 DEBUG [2dff3a36d44f:35265 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:21:29,404 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,419 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,45837,1733743286499, state=OPENING 2024-12-09T11:21:29,431 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:21:29,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:29,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:21:29,447 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:21:29,451 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:21:29,453 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:21:29,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,45837,1733743286499}] 2024-12-09T11:21:29,636 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:21:29,641 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53007, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:21:29,672 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:21:29,673 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:21:29,684 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C45837%2C1733743286499.meta, suffix=.meta, logDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499, archiveDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs, maxLogs=32 2024-12-09T11:21:29,686 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.meta.1733743289686.meta 2024-12-09T11:21:29,719 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.meta.1733743289686.meta 2024-12-09T11:21:29,722 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:37551:37551)] 2024-12-09T11:21:29,725 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:29,727 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:21:29,730 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:21:29,736 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:21:29,742 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:21:29,743 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:29,743 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:21:29,743 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:21:29,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:21:29,753 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:21:29,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:29,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:29,755 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:21:29,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:21:29,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:29,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:29,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:21:29,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:21:29,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:29,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:29,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:21:29,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:21:29,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:29,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:21:29,775 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:21:29,777 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740 2024-12-09T11:21:29,782 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740 2024-12-09T11:21:29,784 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:21:29,784 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:21:29,785 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:21:29,788 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:21:29,789 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824203, jitterRate=0.048028767108917236}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:21:29,790 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:21:29,791 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743289744Writing region info on filesystem at 1733743289744Initializing all the Stores at 1733743289749 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743289749Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743289750 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743289750Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743289750Cleaning up temporary data from old regions at 1733743289784 (+34 ms)Running coprocessor post-open hooks at 1733743289790 (+6 ms)Region opened successfully at 1733743289791 (+1 ms) 2024-12-09T11:21:29,800 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743289626 2024-12-09T11:21:29,812 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:21:29,812 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:21:29,814 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,820 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,45837,1733743286499, state=OPEN 2024-12-09T11:21:29,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:21:29,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:21:29,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:21:29,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:21:29,831 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:29,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:21:29,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,45837,1733743286499 in 376 msec 2024-12-09T11:21:29,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:21:29,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 885 msec 2024-12-09T11:21:29,851 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:21:29,851 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:21:29,873 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:21:29,875 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,45837,1733743286499, seqNum=-1] 2024-12-09T11:21:29,899 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:21:29,903 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38625, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:21:29,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2940 sec 2024-12-09T11:21:29,929 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743289929, completionTime=-1 2024-12-09T11:21:29,933 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:21:29,933 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:21:29,968 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:21:29,968 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743349968 2024-12-09T11:21:29,968 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743409968 2024-12-09T11:21:29,968 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 35 msec 2024-12-09T11:21:29,972 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,35265,1733743285620-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,973 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,35265,1733743285620-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,973 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,35265,1733743285620-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,975 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:35265, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,975 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,976 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:29,989 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:21:30,019 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.334sec 2024-12-09T11:21:30,020 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:21:30,022 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:21:30,023 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:21:30,024 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:21:30,026 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:21:30,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,35265,1733743285620-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:21:30,028 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,35265,1733743285620-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:21:30,038 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:21:30,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5901b87c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:21:30,039 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:21:30,040 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,35265,1733743285620-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:21:30,041 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T11:21:30,041 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T11:21:30,044 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,35265,-1 for getting cluster id 2024-12-09T11:21:30,047 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:21:30,057 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '95584051-85e0-4a54-a7bc-4c4fa87dc41e' 2024-12-09T11:21:30,060 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:21:30,061 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "95584051-85e0-4a54-a7bc-4c4fa87dc41e" 2024-12-09T11:21:30,061 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71d0f01d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:21:30,061 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,35265,-1] 2024-12-09T11:21:30,065 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:21:30,077 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:21:30,079 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50654, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:21:30,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@526567d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:21:30,084 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:21:30,100 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,45837,1733743286499, seqNum=-1] 2024-12-09T11:21:30,101 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:21:30,109 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:21:30,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:30,152 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:21:30,163 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:21:30,168 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T11:21:30,175 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 2dff3a36d44f,35265,1733743285620 2024-12-09T11:21:30,179 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@485738bd 2024-12-09T11:21:30,180 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T11:21:30,183 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T11:21:30,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T11:21:30,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T11:21:30,192 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:21:30,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T11:21:30,223 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:21:30,226 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-09T11:21:30,226 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:21:30,234 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:21:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741835_1011 (size=389) 2024-12-09T11:21:30,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741835_1011 (size=389) 2024-12-09T11:21:30,294 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd7b5327491d9801c9e0c6519cb49c36, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561 2024-12-09T11:21:30,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741836_1012 (size=72) 2024-12-09T11:21:30,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741836_1012 (size=72) 2024-12-09T11:21:30,723 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:30,724 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing cd7b5327491d9801c9e0c6519cb49c36, disabling compactions & flushes 2024-12-09T11:21:30,724 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:30,724 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:30,724 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. after waiting 0 ms 2024-12-09T11:21:30,724 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:30,724 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:30,724 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd7b5327491d9801c9e0c6519cb49c36: Waiting for close lock at 1733743290724Disabling compacts and flushes for region at 1733743290724Disabling writes for close at 1733743290724Writing region close event to WAL at 1733743290724Closed at 1733743290724 2024-12-09T11:21:30,727 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:21:30,732 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733743290727"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743290727"}]},"ts":"1733743290727"} 2024-12-09T11:21:30,740 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T11:21:30,743 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:21:30,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743290743"}]},"ts":"1733743290743"} 2024-12-09T11:21:30,755 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T11:21:30,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cd7b5327491d9801c9e0c6519cb49c36, ASSIGN}] 2024-12-09T11:21:30,763 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cd7b5327491d9801c9e0c6519cb49c36, ASSIGN 2024-12-09T11:21:30,765 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cd7b5327491d9801c9e0c6519cb49c36, ASSIGN; state=OFFLINE, location=2dff3a36d44f,45837,1733743286499; forceNewPlan=false, retain=false 2024-12-09T11:21:30,917 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd7b5327491d9801c9e0c6519cb49c36, regionState=OPENING, regionLocation=2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:30,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cd7b5327491d9801c9e0c6519cb49c36, ASSIGN because future has completed 2024-12-09T11:21:30,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd7b5327491d9801c9e0c6519cb49c36, server=2dff3a36d44f,45837,1733743286499}] 2024-12-09T11:21:31,092 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:31,092 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cd7b5327491d9801c9e0c6519cb49c36, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:21:31,093 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,093 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:21:31,093 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,093 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,099 INFO [StoreOpener-cd7b5327491d9801c9e0c6519cb49c36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,103 INFO [StoreOpener-cd7b5327491d9801c9e0c6519cb49c36-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd7b5327491d9801c9e0c6519cb49c36 columnFamilyName info 2024-12-09T11:21:31,103 DEBUG [StoreOpener-cd7b5327491d9801c9e0c6519cb49c36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:21:31,105 INFO [StoreOpener-cd7b5327491d9801c9e0c6519cb49c36-1 {}] regionserver.HStore(327): Store=cd7b5327491d9801c9e0c6519cb49c36/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:21:31,105 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,107 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,107 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,108 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,108 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,114 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,133 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:21:31,135 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cd7b5327491d9801c9e0c6519cb49c36; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820714, jitterRate=0.043593019247055054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:21:31,135 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:31,137 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cd7b5327491d9801c9e0c6519cb49c36: Running coprocessor pre-open hook at 1733743291093Writing region info on filesystem at 1733743291093Initializing all the Stores at 1733743291095 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743291095Cleaning up temporary data from old regions at 1733743291108 (+13 ms)Running coprocessor post-open hooks at 1733743291136 (+28 ms)Region opened successfully at 1733743291137 (+1 ms) 2024-12-09T11:21:31,140 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36., pid=6, masterSystemTime=1733743291082 2024-12-09T11:21:31,146 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd7b5327491d9801c9e0c6519cb49c36, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,45837,1733743286499 2024-12-09T11:21:31,151 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:31,151 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:31,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd7b5327491d9801c9e0c6519cb49c36, server=2dff3a36d44f,45837,1733743286499 because future has completed 2024-12-09T11:21:31,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T11:21:31,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cd7b5327491d9801c9e0c6519cb49c36, server=2dff3a36d44f,45837,1733743286499 in 231 msec 2024-12-09T11:21:31,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T11:21:31,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=cd7b5327491d9801c9e0c6519cb49c36, ASSIGN in 405 msec 2024-12-09T11:21:31,172 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:21:31,172 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743291172"}]},"ts":"1733743291172"} 2024-12-09T11:21:31,181 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T11:21:31,186 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:21:31,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 990 msec 2024-12-09T11:21:35,256 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T11:21:35,313 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:21:35,315 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T11:21:36,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:21:36,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T11:21:36,174 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T11:21:36,174 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T11:21:36,176 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:21:36,176 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T11:21:36,176 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T11:21:36,176 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T11:21:40,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35265 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:21:40,278 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-09T11:21:40,282 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-09T11:21:40,290 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T11:21:40,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:21:40,293 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743300293 2024-12-09T11:21:40,304 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:21:40,304 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:21:40,305 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:21:40,305 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:21:40,305 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:21:40,306 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743289271 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743300293 2024-12-09T11:21:40,307 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:39453:39453)] 2024-12-09T11:21:40,307 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743289271 is not closed yet, will try archiving it next time 2024-12-09T11:21:40,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741833_1009 (size=451) 2024-12-09T11:21:40,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741833_1009 (size=451) 2024-12-09T11:21:40,313 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743289271 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743289271 2024-12-09T11:21:40,316 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36., hostname=2dff3a36d44f,45837,1733743286499, seqNum=2] 2024-12-09T11:21:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45837 {}] regionserver.HRegion(8855): Flush requested on cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:21:52,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd7b5327491d9801c9e0c6519cb49c36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:21:52,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/f244e1f35757439f926d6aa537f17512 is 1080, key is row0001/info:/1733743300319/Put/seqid=0 2024-12-09T11:21:52,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741838_1014 (size=12509) 2024-12-09T11:21:52,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741838_1014 (size=12509) 2024-12-09T11:21:52,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/f244e1f35757439f926d6aa537f17512 2024-12-09T11:21:52,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/f244e1f35757439f926d6aa537f17512 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512 2024-12-09T11:21:52,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T11:21:52,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cd7b5327491d9801c9e0c6519cb49c36 in 165ms, sequenceid=11, compaction requested=false 2024-12-09T11:21:52,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd7b5327491d9801c9e0c6519cb49c36: 2024-12-09T11:21:54,531 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:22:00,363 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743320363 2024-12-09T11:22:00,572 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:00,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:00,573 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:00,573 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:00,573 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:00,573 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:00,574 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743300293 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743320363 2024-12-09T11:22:00,575 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:39453:39453)] 2024-12-09T11:22:00,575 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743300293 is not closed yet, will try archiving it next time 2024-12-09T11:22:00,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741837_1013 (size=12399) 2024-12-09T11:22:00,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741837_1013 (size=12399) 2024-12-09T11:22:00,779 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:02,983 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:05,187 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:07,392 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:07,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45837 {}] regionserver.HRegion(8855): Flush requested on cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:22:07,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd7b5327491d9801c9e0c6519cb49c36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:22:07,595 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:07,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/dbfefe20f4454107a6f57314b280a98b is 1080, key is row0008/info:/1733743314353/Put/seqid=0 2024-12-09T11:22:07,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741840_1016 (size=12509) 2024-12-09T11:22:07,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741840_1016 (size=12509) 2024-12-09T11:22:07,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/dbfefe20f4454107a6f57314b280a98b 2024-12-09T11:22:07,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/dbfefe20f4454107a6f57314b280a98b as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/dbfefe20f4454107a6f57314b280a98b 2024-12-09T11:22:07,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/dbfefe20f4454107a6f57314b280a98b, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T11:22:07,843 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:07,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cd7b5327491d9801c9e0c6519cb49c36 in 450ms, sequenceid=21, compaction requested=false 2024-12-09T11:22:07,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd7b5327491d9801c9e0c6519cb49c36: 2024-12-09T11:22:07,843 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-09T11:22:07,843 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:22:07,844 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512 because midkey is the same as first or last row 2024-12-09T11:22:09,598 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:10,041 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T11:22:10,041 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T11:22:11,802 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:11,805 WARN [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:11,806 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C45837%2C1733743286499:(num 1733743320363) roll requested 2024-12-09T11:22:11,806 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743331806 2024-12-09T11:22:12,015 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK], DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK]] 2024-12-09T11:22:12,015 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:12,015 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:12,015 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:12,015 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:12,016 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:12,016 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743320363 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743331806 2024-12-09T11:22:12,017 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:37551:37551)] 2024-12-09T11:22:12,017 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743320363 is not closed yet, will try archiving it next time 2024-12-09T11:22:12,017 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743300293 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743300293 2024-12-09T11:22:12,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741839_1015 (size=7739) 2024-12-09T11:22:12,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741839_1015 (size=7739) 2024-12-09T11:22:14,007 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:16,093 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cd7b5327491d9801c9e0c6519cb49c36, had cached 0 bytes from a total of 25018 2024-12-09T11:22:16,214 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:18,418 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:20,624 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:22,626 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:22:22,627 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743342626 2024-12-09T11:22:24,531 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:22:27,635 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:27,638 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:27,638 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C45837%2C1733743286499:(num 1733743342626) roll requested 2024-12-09T11:22:27,638 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:27,638 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:27,638 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:27,639 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:27,639 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:27,639 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743331806 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743342626 2024-12-09T11:22:27,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741841_1017 (size=4753) 2024-12-09T11:22:27,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741841_1017 (size=4753) 2024-12-09T11:22:27,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:37551:37551)] 2024-12-09T11:22:27,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743331806 is not closed yet, will try archiving it next time 2024-12-09T11:22:27,646 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743347646 2024-12-09T11:22:32,649 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:32,650 WARN [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:32,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45837 {}] regionserver.HRegion(8855): Flush requested on cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:22:32,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd7b5327491d9801c9e0c6519cb49c36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:22:32,655 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:32,655 WARN [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:34,651 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:22:37,653 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:37,653 WARN [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:37,653 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:37,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:37,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:37,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:37,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:37,654 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743342626 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 2024-12-09T11:22:37,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741842_1018 (size=1569) 2024-12-09T11:22:37,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741842_1018 (size=1569) 2024-12-09T11:22:37,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/75123c9972ca452b9edd0e0c73611953 is 1080, key is row0015/info:/1733743329395/Put/seqid=0 2024-12-09T11:22:37,664 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:37551:37551)] 2024-12-09T11:22:37,664 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C45837%2C1733743286499:(num 1733743347646) roll requested 2024-12-09T11:22:37,664 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743357664 2024-12-09T11:22:37,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741844_1020 (size=12509) 2024-12-09T11:22:37,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741844_1020 (size=12509) 2024-12-09T11:22:37,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/75123c9972ca452b9edd0e0c73611953 2024-12-09T11:22:37,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/75123c9972ca452b9edd0e0c73611953 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/75123c9972ca452b9edd0e0c73611953 2024-12-09T11:22:37,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/75123c9972ca452b9edd0e0c73611953, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T11:22:42,672 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:42,672 WARN [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:42,689 INFO [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:42,690 WARN [FSHLog-0-hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561-prefix:2dff3a36d44f,45837,1733743286499 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42561,DS-9a3982fe-c647-4fbc-b4b1-edec615dfec6,DISK], DatanodeInfoWithStorage[127.0.0.1:38779,DS-aa20f297-7ffd-406c-a5a9-b0289e9f4195,DISK]] 2024-12-09T11:22:42,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cd7b5327491d9801c9e0c6519cb49c36 in 10039ms, sequenceid=31, compaction requested=true 2024-12-09T11:22:42,690 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd7b5327491d9801c9e0c6519cb49c36: 2024-12-09T11:22:42,690 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,690 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,690 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-09T11:22:42,690 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:22:42,690 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,690 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512 because midkey is the same as first or last row 2024-12-09T11:22:42,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,691 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743357664 2024-12-09T11:22:42,692 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:39453:39453)] 2024-12-09T11:22:42,692 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 is not closed yet, will try archiving it next time 2024-12-09T11:22:42,692 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743320363 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743320363 2024-12-09T11:22:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd7b5327491d9801c9e0c6519cb49c36:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:22:42,692 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C45837%2C1733743286499:(num 1733743362692) roll requested 2024-12-09T11:22:42,692 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743362692 2024-12-09T11:22:42,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741843_1019 (size=438) 2024-12-09T11:22:42,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741843_1019 (size=438) 2024-12-09T11:22:42,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:22:42,695 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743331806 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743331806 2024-12-09T11:22:42,695 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:22:42,697 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743342626 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743342626 2024-12-09T11:22:42,698 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:22:42,700 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.HStore(1541): cd7b5327491d9801c9e0c6519cb49c36/info is initiating minor compaction (all files) 2024-12-09T11:22:42,700 INFO [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of cd7b5327491d9801c9e0c6519cb49c36/info in TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:22:42,701 INFO [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/dbfefe20f4454107a6f57314b280a98b, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/75123c9972ca452b9edd0e0c73611953] into tmpdir=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp, totalSize=36.6 K 2024-12-09T11:22:42,702 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,702 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] compactions.Compactor(225): Compacting f244e1f35757439f926d6aa537f17512, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733743300319 2024-12-09T11:22:42,702 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,702 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,702 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,703 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,703 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743357664 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743362692 2024-12-09T11:22:42,703 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] compactions.Compactor(225): Compacting dbfefe20f4454107a6f57314b280a98b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733743314353 2024-12-09T11:22:42,704 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] compactions.Compactor(225): Compacting 75123c9972ca452b9edd0e0c73611953, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733743329395 2024-12-09T11:22:42,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741845_1021 (size=93) 2024-12-09T11:22:42,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741845_1021 (size=93) 2024-12-09T11:22:42,706 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 is not closed yet, will try archiving it next time 2024-12-09T11:22:42,706 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743357664 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743357664 2024-12-09T11:22:42,706 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37551:37551),(127.0.0.1/127.0.0.1:39453:39453)] 2024-12-09T11:22:42,706 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 is not closed yet, will try archiving it next time 2024-12-09T11:22:42,707 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C45837%2C1733743286499.1733743362707 2024-12-09T11:22:42,714 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,714 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,714 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,714 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,715 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:22:42,715 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743362692 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743362707 2024-12-09T11:22:42,716 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:37551:37551)] 2024-12-09T11:22:42,716 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 is not closed yet, will try archiving it next time 2024-12-09T11:22:42,716 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743362692 is not closed yet, will try archiving it next time 2024-12-09T11:22:42,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741846_1022 (size=1258) 2024-12-09T11:22:42,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741846_1022 (size=1258) 2024-12-09T11:22:42,718 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 is not closed yet, will try archiving it next time 2024-12-09T11:22:42,738 INFO [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd7b5327491d9801c9e0c6519cb49c36#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:22:42,739 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/7bff5bab21c343f0a3152135e54b819e is 1080, key is row0001/info:/1733743300319/Put/seqid=0 2024-12-09T11:22:42,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741848_1024 (size=27710) 2024-12-09T11:22:42,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741848_1024 (size=27710) 2024-12-09T11:22:42,755 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/7bff5bab21c343f0a3152135e54b819e as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/7bff5bab21c343f0a3152135e54b819e 2024-12-09T11:22:42,774 INFO [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cd7b5327491d9801c9e0c6519cb49c36/info of cd7b5327491d9801c9e0c6519cb49c36 into 7bff5bab21c343f0a3152135e54b819e(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:22:42,774 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for cd7b5327491d9801c9e0c6519cb49c36: 2024-12-09T11:22:42,777 INFO [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36., storeName=cd7b5327491d9801c9e0c6519cb49c36/info, priority=13, startTime=1733743362692; duration=0sec 2024-12-09T11:22:42,777 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T11:22:42,777 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:22:42,777 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/7bff5bab21c343f0a3152135e54b819e because midkey is the same as first or last row 2024-12-09T11:22:42,777 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T11:22:42,777 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:22:42,778 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/7bff5bab21c343f0a3152135e54b819e because midkey is the same as first or last row 2024-12-09T11:22:42,778 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T11:22:42,778 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:22:42,778 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/7bff5bab21c343f0a3152135e54b819e because midkey is the same as first or last row 2024-12-09T11:22:42,778 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:22:42,778 DEBUG [RS:0;2dff3a36d44f:45837-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd7b5327491d9801c9e0c6519cb49c36:info 2024-12-09T11:22:43,095 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/WALs/2dff3a36d44f,45837,1733743286499/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs/2dff3a36d44f%2C45837%2C1733743286499.1733743347646 2024-12-09T11:22:54,532 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:22:54,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45837 {}] regionserver.HRegion(8855): Flush requested on cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:22:54,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing cd7b5327491d9801c9e0c6519cb49c36 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:22:54,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/97a5f81df92b4cfc8d7f3d7295924cf1 is 1080, key is row0022/info:/1733743362708/Put/seqid=0 2024-12-09T11:22:54,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741849_1025 (size=12509) 2024-12-09T11:22:54,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741849_1025 (size=12509) 2024-12-09T11:22:54,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/97a5f81df92b4cfc8d7f3d7295924cf1 2024-12-09T11:22:54,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/97a5f81df92b4cfc8d7f3d7295924cf1 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/97a5f81df92b4cfc8d7f3d7295924cf1 2024-12-09T11:22:54,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/97a5f81df92b4cfc8d7f3d7295924cf1, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T11:22:54,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for cd7b5327491d9801c9e0c6519cb49c36 in 58ms, sequenceid=42, compaction requested=false 2024-12-09T11:22:54,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for cd7b5327491d9801c9e0c6519cb49c36: 2024-12-09T11:22:54,792 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-09T11:22:54,792 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:22:54,792 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/7bff5bab21c343f0a3152135e54b819e because midkey is the same as first or last row 2024-12-09T11:23:01,093 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cd7b5327491d9801c9e0c6519cb49c36, had cached 0 bytes from a total of 40219 2024-12-09T11:23:02,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:23:02,760 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:23:02,761 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:02,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:02,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:02,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:23:02,767 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:23:02,767 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=994033306, stopped=false 2024-12-09T11:23:02,768 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,35265,1733743285620 2024-12-09T11:23:02,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:02,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:02,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:02,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:02,771 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:23:02,771 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:23:02,771 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:02,771 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:02,771 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:02,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:02,772 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,45837,1733743286499' ***** 2024-12-09T11:23:02,772 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:23:02,773 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:23:02,773 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:23:02,773 INFO [RS:0;2dff3a36d44f:45837 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:23:02,773 INFO [RS:0;2dff3a36d44f:45837 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:23:02,773 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(3091): Received CLOSE for cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:23:02,774 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,45837,1733743286499 2024-12-09T11:23:02,774 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:02,774 INFO [RS:0;2dff3a36d44f:45837 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:45837. 2024-12-09T11:23:02,774 DEBUG [RS:0;2dff3a36d44f:45837 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:02,774 DEBUG [RS:0;2dff3a36d44f:45837 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:02,774 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cd7b5327491d9801c9e0c6519cb49c36, disabling compactions & flushes 2024-12-09T11:23:02,774 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:23:02,774 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:23:02,774 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:23:02,774 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:23:02,774 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:23:02,775 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. after waiting 0 ms 2024-12-09T11:23:02,775 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:23:02,775 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:23:02,775 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cd7b5327491d9801c9e0c6519cb49c36 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T11:23:02,775 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:23:02,775 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:23:02,775 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, cd7b5327491d9801c9e0c6519cb49c36=TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.} 2024-12-09T11:23:02,775 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:23:02,775 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:23:02,775 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:23:02,775 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:23:02,776 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:23:02,776 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-09T11:23:02,782 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/70e51e7043564a8f87ec75e5ef565c99 is 1080, key is row0029/info:/1733743376746/Put/seqid=0 2024-12-09T11:23:02,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741850_1026 (size=8193) 2024-12-09T11:23:02,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741850_1026 (size=8193) 2024-12-09T11:23:02,805 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/info/f58c95550f4749f49409b596bb7ce673 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36./info:regioninfo/1733743291146/Put/seqid=0 2024-12-09T11:23:02,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741851_1027 (size=7016) 2024-12-09T11:23:02,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741851_1027 (size=7016) 2024-12-09T11:23:02,812 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/info/f58c95550f4749f49409b596bb7ce673 2024-12-09T11:23:02,838 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/ns/2747899d2b3c45a68c1417cbecfd34c6 is 43, key is default/ns:d/1733743289907/Put/seqid=0 2024-12-09T11:23:02,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741852_1028 (size=5153) 2024-12-09T11:23:02,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741852_1028 (size=5153) 2024-12-09T11:23:02,850 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/ns/2747899d2b3c45a68c1417cbecfd34c6 2024-12-09T11:23:02,876 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/table/457416555d0740e881919828f36ae1ad is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733743291172/Put/seqid=0 2024-12-09T11:23:02,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741853_1029 (size=5396) 2024-12-09T11:23:02,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741853_1029 (size=5396) 2024-12-09T11:23:02,886 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/table/457416555d0740e881919828f36ae1ad 2024-12-09T11:23:02,896 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/info/f58c95550f4749f49409b596bb7ce673 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/info/f58c95550f4749f49409b596bb7ce673 2024-12-09T11:23:02,905 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/info/f58c95550f4749f49409b596bb7ce673, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T11:23:02,906 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/ns/2747899d2b3c45a68c1417cbecfd34c6 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/ns/2747899d2b3c45a68c1417cbecfd34c6 2024-12-09T11:23:02,915 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/ns/2747899d2b3c45a68c1417cbecfd34c6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T11:23:02,917 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/.tmp/table/457416555d0740e881919828f36ae1ad as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/table/457416555d0740e881919828f36ae1ad 2024-12-09T11:23:02,926 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/table/457416555d0740e881919828f36ae1ad, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T11:23:02,927 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 152ms, sequenceid=11, compaction requested=false 2024-12-09T11:23:02,951 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T11:23:02,954 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:23:02,955 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:02,955 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743382775Running coprocessor pre-close hooks at 1733743382775Disabling compacts and flushes for region at 1733743382775Disabling writes for close at 1733743382775Obtaining lock to block concurrent updates at 1733743382776 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733743382776Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733743382776Flushing stores of hbase:meta,,1.1588230740 at 1733743382777 (+1 ms)Flushing 1588230740/info: creating writer at 1733743382777Flushing 1588230740/info: appending metadata at 1733743382804 (+27 ms)Flushing 1588230740/info: closing flushed file at 1733743382804Flushing 1588230740/ns: creating writer at 1733743382821 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733743382837 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733743382838 (+1 ms)Flushing 1588230740/table: creating writer at 1733743382858 (+20 ms)Flushing 1588230740/table: appending metadata at 1733743382875 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733743382875Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5077ac73: reopening flushed file at 1733743382894 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4847a166: reopening flushed file at 1733743382905 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc5c40a: reopening flushed file at 1733743382916 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 152ms, sequenceid=11, compaction requested=false at 1733743382928 (+12 ms)Writing region close event to WAL at 1733743382935 (+7 ms)Running coprocessor post-close hooks at 1733743382952 (+17 ms)Closed at 1733743382954 (+2 ms) 2024-12-09T11:23:02,955 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:02,976 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1351): Waiting on cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:23:03,079 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:23:03,079 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:23:03,100 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:03,176 DEBUG [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1351): Waiting on cd7b5327491d9801c9e0c6519cb49c36 2024-12-09T11:23:03,194 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/70e51e7043564a8f87ec75e5ef565c99 2024-12-09T11:23:03,202 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/.tmp/info/70e51e7043564a8f87ec75e5ef565c99 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/70e51e7043564a8f87ec75e5ef565c99 2024-12-09T11:23:03,210 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/70e51e7043564a8f87ec75e5ef565c99, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T11:23:03,211 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for cd7b5327491d9801c9e0c6519cb49c36 in 436ms, sequenceid=48, compaction requested=true 2024-12-09T11:23:03,212 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/dbfefe20f4454107a6f57314b280a98b, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/75123c9972ca452b9edd0e0c73611953] to archive 2024-12-09T11:23:03,215 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:23:03,217 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/archive/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/f244e1f35757439f926d6aa537f17512 2024-12-09T11:23:03,219 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/dbfefe20f4454107a6f57314b280a98b to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/archive/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/dbfefe20f4454107a6f57314b280a98b 2024-12-09T11:23:03,221 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/75123c9972ca452b9edd0e0c73611953 to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/archive/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/info/75123c9972ca452b9edd0e0c73611953 2024-12-09T11:23:03,232 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2dff3a36d44f:35265 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T11:23:03,233 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f244e1f35757439f926d6aa537f17512=12509, dbfefe20f4454107a6f57314b280a98b=12509, 75123c9972ca452b9edd0e0c73611953=12509] 2024-12-09T11:23:03,239 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/data/default/TestLogRolling-testSlowSyncLogRolling/cd7b5327491d9801c9e0c6519cb49c36/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T11:23:03,240 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:23:03,240 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cd7b5327491d9801c9e0c6519cb49c36: Waiting for close lock at 1733743382774Running coprocessor pre-close hooks at 1733743382774Disabling compacts and flushes for region at 1733743382774Disabling writes for close at 1733743382775 (+1 ms)Obtaining lock to block concurrent updates at 1733743382775Preparing flush snapshotting stores in cd7b5327491d9801c9e0c6519cb49c36 at 1733743382775Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733743382775Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. at 1733743382777 (+2 ms)Flushing cd7b5327491d9801c9e0c6519cb49c36/info: creating writer at 1733743382777Flushing cd7b5327491d9801c9e0c6519cb49c36/info: appending metadata at 1733743382781 (+4 ms)Flushing cd7b5327491d9801c9e0c6519cb49c36/info: closing flushed file at 1733743382781Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@174b7fed: reopening flushed file at 1733743383201 (+420 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for cd7b5327491d9801c9e0c6519cb49c36 in 436ms, sequenceid=48, compaction requested=true at 1733743383211 (+10 ms)Writing region close event to WAL at 1733743383234 (+23 ms)Running coprocessor post-close hooks at 1733743383240 (+6 ms)Closed at 1733743383240 2024-12-09T11:23:03,241 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733743290185.cd7b5327491d9801c9e0c6519cb49c36. 2024-12-09T11:23:03,376 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,45837,1733743286499; all regions closed. 2024-12-09T11:23:03,378 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741834_1010 (size=3066) 2024-12-09T11:23:03,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741834_1010 (size=3066) 2024-12-09T11:23:03,788 DEBUG [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs 2024-12-09T11:23:03,788 INFO [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C45837%2C1733743286499.meta:.meta(num 1733743289686) 2024-12-09T11:23:03,788 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,789 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,789 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,789 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741847_1023 (size=12695) 2024-12-09T11:23:03,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741847_1023 (size=12695) 2024-12-09T11:23:03,796 DEBUG [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/oldWALs 2024-12-09T11:23:03,796 INFO [RS:0;2dff3a36d44f:45837 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C45837%2C1733743286499:(num 1733743362707) 2024-12-09T11:23:03,796 DEBUG [RS:0;2dff3a36d44f:45837 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:03,796 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:03,797 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:03,797 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:03,797 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:03,797 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:03,797 INFO [RS:0;2dff3a36d44f:45837 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45837 2024-12-09T11:23:03,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,45837,1733743286499 2024-12-09T11:23:03,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:03,802 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:03,804 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,45837,1733743286499] 2024-12-09T11:23:03,807 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,45837,1733743286499 already deleted, retry=false 2024-12-09T11:23:03,807 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,45837,1733743286499 expired; onlineServers=0 2024-12-09T11:23:03,807 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,35265,1733743285620' ***** 2024-12-09T11:23:03,807 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:23:03,807 INFO [M:0;2dff3a36d44f:35265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:03,808 INFO [M:0;2dff3a36d44f:35265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:03,808 DEBUG [M:0;2dff3a36d44f:35265 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:23:03,808 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:23:03,808 DEBUG [M:0;2dff3a36d44f:35265 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:23:03,808 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743288843 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743288843,5,FailOnTimeoutGroup] 2024-12-09T11:23:03,808 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743288863 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743288863,5,FailOnTimeoutGroup] 2024-12-09T11:23:03,808 INFO [M:0;2dff3a36d44f:35265 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:03,808 INFO [M:0;2dff3a36d44f:35265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:03,808 DEBUG [M:0;2dff3a36d44f:35265 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:23:03,808 INFO [M:0;2dff3a36d44f:35265 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:23:03,809 INFO [M:0;2dff3a36d44f:35265 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:23:03,809 INFO [M:0;2dff3a36d44f:35265 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:23:03,809 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:23:03,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:23:03,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:03,810 DEBUG [M:0;2dff3a36d44f:35265 {}] zookeeper.ZKUtil(347): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:23:03,810 WARN [M:0;2dff3a36d44f:35265 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:23:03,810 INFO [M:0;2dff3a36d44f:35265 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/.lastflushedseqids 2024-12-09T11:23:03,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741854_1030 (size=130) 2024-12-09T11:23:03,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741854_1030 (size=130) 2024-12-09T11:23:03,823 INFO [M:0;2dff3a36d44f:35265 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:23:03,823 INFO [M:0;2dff3a36d44f:35265 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:23:03,824 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:23:03,824 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:03,824 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:03,824 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:23:03,824 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:03,824 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-09T11:23:03,841 DEBUG [M:0;2dff3a36d44f:35265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/838870ca8fb3485a9c2a275c5f045abd is 82, key is hbase:meta,,1/info:regioninfo/1733743289814/Put/seqid=0 2024-12-09T11:23:03,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741855_1031 (size=5672) 2024-12-09T11:23:03,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741855_1031 (size=5672) 2024-12-09T11:23:03,848 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/838870ca8fb3485a9c2a275c5f045abd 2024-12-09T11:23:03,871 DEBUG [M:0;2dff3a36d44f:35265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10474eaa91d44c07be38d00b5a92af7a is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733743291189/Put/seqid=0 2024-12-09T11:23:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741856_1032 (size=6246) 2024-12-09T11:23:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741856_1032 (size=6246) 2024-12-09T11:23:03,877 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10474eaa91d44c07be38d00b5a92af7a 2024-12-09T11:23:03,883 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 10474eaa91d44c07be38d00b5a92af7a 2024-12-09T11:23:03,900 DEBUG [M:0;2dff3a36d44f:35265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98ad1e8c45624571a704136f04241da2 is 69, key is 2dff3a36d44f,45837,1733743286499/rs:state/1733743288997/Put/seqid=0 2024-12-09T11:23:03,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:03,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45837-0x1012aeaa1ad0001, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:03,905 INFO [RS:0;2dff3a36d44f:45837 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:03,905 INFO [RS:0;2dff3a36d44f:45837 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,45837,1733743286499; zookeeper connection closed. 2024-12-09T11:23:03,906 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@32c63370 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@32c63370 2024-12-09T11:23:03,906 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T11:23:03,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741857_1033 (size=5156) 2024-12-09T11:23:03,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741857_1033 (size=5156) 2024-12-09T11:23:03,908 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98ad1e8c45624571a704136f04241da2 2024-12-09T11:23:03,935 DEBUG [M:0;2dff3a36d44f:35265 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/76cbfedcaba042e9b146f436019b0b34 is 52, key is load_balancer_on/state:d/1733743290158/Put/seqid=0 2024-12-09T11:23:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741858_1034 (size=5056) 2024-12-09T11:23:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741858_1034 (size=5056) 2024-12-09T11:23:03,944 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/76cbfedcaba042e9b146f436019b0b34 2024-12-09T11:23:03,954 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/838870ca8fb3485a9c2a275c5f045abd as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/838870ca8fb3485a9c2a275c5f045abd 2024-12-09T11:23:03,961 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/838870ca8fb3485a9c2a275c5f045abd, entries=8, sequenceid=59, filesize=5.5 K 2024-12-09T11:23:03,963 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/10474eaa91d44c07be38d00b5a92af7a as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10474eaa91d44c07be38d00b5a92af7a 2024-12-09T11:23:03,970 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 10474eaa91d44c07be38d00b5a92af7a 2024-12-09T11:23:03,970 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/10474eaa91d44c07be38d00b5a92af7a, entries=6, sequenceid=59, filesize=6.1 K 2024-12-09T11:23:03,971 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/98ad1e8c45624571a704136f04241da2 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/98ad1e8c45624571a704136f04241da2 2024-12-09T11:23:03,977 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/98ad1e8c45624571a704136f04241da2, entries=1, sequenceid=59, filesize=5.0 K 2024-12-09T11:23:03,978 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/76cbfedcaba042e9b146f436019b0b34 as hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/76cbfedcaba042e9b146f436019b0b34 2024-12-09T11:23:03,985 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/76cbfedcaba042e9b146f436019b0b34, entries=1, sequenceid=59, filesize=4.9 K 2024-12-09T11:23:03,987 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 163ms, sequenceid=59, compaction requested=false 2024-12-09T11:23:03,989 INFO [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:03,989 DEBUG [M:0;2dff3a36d44f:35265 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743383823Disabling compacts and flushes for region at 1733743383823Disabling writes for close at 1733743383824 (+1 ms)Obtaining lock to block concurrent updates at 1733743383824Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743383824Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733743383824Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743383825 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743383825Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743383841 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743383841Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743383855 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743383870 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743383870Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743383884 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743383900 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743383900Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743383915 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743383934 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743383934Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@765b1be5: reopening flushed file at 1733743383952 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fcc599f: reopening flushed file at 1733743383962 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7471c1c4: reopening flushed file at 1733743383970 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12b1b725: reopening flushed file at 1733743383977 (+7 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 163ms, sequenceid=59, compaction requested=false at 1733743383987 (+10 ms)Writing region close event to WAL at 1733743383989 (+2 ms)Closed at 1733743383989 2024-12-09T11:23:03,990 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:03,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42561 is added to blk_1073741830_1006 (size=27961) 2024-12-09T11:23:03,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38779 is added to blk_1073741830_1006 (size=27961) 2024-12-09T11:23:03,998 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:03,999 INFO [M:0;2dff3a36d44f:35265 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:23:03,999 INFO [M:0;2dff3a36d44f:35265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35265 2024-12-09T11:23:03,999 INFO [M:0;2dff3a36d44f:35265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:04,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:04,102 INFO [M:0;2dff3a36d44f:35265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:04,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35265-0x1012aeaa1ad0000, quorum=127.0.0.1:61679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:04,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78be0d39{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:04,109 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@617aa169{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:04,109 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:04,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e06ea5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:04,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1612a852{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:04,113 WARN [BP-1173096048-172.17.0.3-1733743281430 heartbeating to localhost/127.0.0.1:38079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:04,113 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:04,113 WARN [BP-1173096048-172.17.0.3-1733743281430 heartbeating to localhost/127.0.0.1:38079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1173096048-172.17.0.3-1733743281430 (Datanode Uuid 80241328-4d93-4f1d-b318-dc9112f4d4bb) service to localhost/127.0.0.1:38079 2024-12-09T11:23:04,113 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:04,114 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data3/current/BP-1173096048-172.17.0.3-1733743281430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:04,114 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data4/current/BP-1173096048-172.17.0.3-1733743281430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:04,115 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:04,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32c41a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:04,118 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21c64e78{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:04,118 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:04,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198fe7a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:04,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@616d254c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:04,120 WARN [BP-1173096048-172.17.0.3-1733743281430 heartbeating to localhost/127.0.0.1:38079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:04,120 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:04,120 WARN [BP-1173096048-172.17.0.3-1733743281430 heartbeating to localhost/127.0.0.1:38079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1173096048-172.17.0.3-1733743281430 (Datanode Uuid ffc48d17-9d93-4924-a708-4450516f13ee) service to localhost/127.0.0.1:38079 2024-12-09T11:23:04,120 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:04,120 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data1/current/BP-1173096048-172.17.0.3-1733743281430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:04,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/cluster_3603d4f6-449d-a5b3-3a0a-a0af155f78f5/data/data2/current/BP-1173096048-172.17.0.3-1733743281430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:04,121 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:04,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f961078{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:23:04,132 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:04,132 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:04,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:04,132 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:04,144 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:23:04,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:23:04,202 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@51a0b850 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/2dff3a36d44f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:38079 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/2dff3a36d44f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38079 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:38079 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: regionserver/2dff3a36d44f:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:38079 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=486 (was 472) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2015 (was 3057) 2024-12-09T11:23:04,216 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=486, ProcessCount=11, AvailableMemoryMB=2014 2024-12-09T11:23:04,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:23:04,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.log.dir so I do NOT create it in target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb 2024-12-09T11:23:04,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcaa2b05-bf9a-c03a-2666-e19ee4c9c005/hadoop.tmp.dir so I do NOT create it in target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb 2024-12-09T11:23:04,217 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b, deleteOnExit=true 2024-12-09T11:23:04,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:23:04,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/test.cache.data in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:23:04,218 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:23:04,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:23:04,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:23:04,233 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:23:04,304 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:04,311 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:04,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:04,312 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:04,312 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:23:04,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:04,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@246cb388{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:04,314 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73737af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:04,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5316f6d0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/java.io.tmpdir/jetty-localhost-32875-hadoop-hdfs-3_4_1-tests_jar-_-any-5547008310411951034/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:23:04,448 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ced705f{HTTP/1.1, (http/1.1)}{localhost:32875} 2024-12-09T11:23:04,448 INFO [Time-limited test {}] server.Server(415): Started @105721ms 2024-12-09T11:23:04,466 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:23:04,558 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:04,563 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:04,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:04,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:04,564 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:23:04,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ca07ff3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:04,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22ba3420{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:04,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a787e38{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/java.io.tmpdir/jetty-localhost-38941-hadoop-hdfs-3_4_1-tests_jar-_-any-2227731740362830384/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:04,696 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@144bd696{HTTP/1.1, (http/1.1)}{localhost:38941} 2024-12-09T11:23:04,696 INFO [Time-limited test {}] server.Server(415): Started @105969ms 2024-12-09T11:23:04,698 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:04,741 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:04,745 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:04,745 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:04,745 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:04,746 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:04,746 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16c9fe9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:04,746 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2502c8c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:04,822 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data1/current/BP-791067690-172.17.0.3-1733743384252/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:04,822 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data2/current/BP-791067690-172.17.0.3-1733743384252/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:04,853 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:04,856 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52cdcc12a20e85b8 with lease ID 0xce13bfaf6b2c89fa: Processing first storage report for DS-3d937cb1-dd40-4da7-8d45-a96baef3997c from datanode DatanodeRegistration(127.0.0.1:45679, datanodeUuid=10fcfe74-71d9-4d09-80a2-d0b0605794af, infoPort=44937, infoSecurePort=0, ipcPort=35995, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252) 2024-12-09T11:23:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52cdcc12a20e85b8 with lease ID 0xce13bfaf6b2c89fa: from storage DS-3d937cb1-dd40-4da7-8d45-a96baef3997c node DatanodeRegistration(127.0.0.1:45679, datanodeUuid=10fcfe74-71d9-4d09-80a2-d0b0605794af, infoPort=44937, infoSecurePort=0, ipcPort=35995, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:23:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x52cdcc12a20e85b8 with lease ID 0xce13bfaf6b2c89fa: Processing first storage report for DS-0d0a7843-9602-4678-96db-843da88c022f from datanode DatanodeRegistration(127.0.0.1:45679, datanodeUuid=10fcfe74-71d9-4d09-80a2-d0b0605794af, infoPort=44937, infoSecurePort=0, ipcPort=35995, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252) 2024-12-09T11:23:04,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52cdcc12a20e85b8 with lease ID 0xce13bfaf6b2c89fa: from storage DS-0d0a7843-9602-4678-96db-843da88c022f node DatanodeRegistration(127.0.0.1:45679, datanodeUuid=10fcfe74-71d9-4d09-80a2-d0b0605794af, infoPort=44937, infoSecurePort=0, ipcPort=35995, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:04,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d2d0bcc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/java.io.tmpdir/jetty-localhost-43453-hadoop-hdfs-3_4_1-tests_jar-_-any-8259178900539163819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:04,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57c7b86a{HTTP/1.1, (http/1.1)}{localhost:43453} 2024-12-09T11:23:04,877 INFO [Time-limited test {}] server.Server(415): Started @106150ms 2024-12-09T11:23:04,879 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:04,983 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data3/current/BP-791067690-172.17.0.3-1733743384252/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:04,983 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data4/current/BP-791067690-172.17.0.3-1733743384252/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:05,001 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb653fb8c61d060d2 with lease ID 0xce13bfaf6b2c89fb: Processing first storage report for DS-14d62653-b463-4ee2-9608-b45d58319651 from datanode DatanodeRegistration(127.0.0.1:36379, datanodeUuid=c5a7def3-9666-447e-acc7-b9cce9d6e6b1, infoPort=43595, infoSecurePort=0, ipcPort=42225, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252) 2024-12-09T11:23:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb653fb8c61d060d2 with lease ID 0xce13bfaf6b2c89fb: from storage DS-14d62653-b463-4ee2-9608-b45d58319651 node DatanodeRegistration(127.0.0.1:36379, datanodeUuid=c5a7def3-9666-447e-acc7-b9cce9d6e6b1, infoPort=43595, infoSecurePort=0, ipcPort=42225, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb653fb8c61d060d2 with lease ID 0xce13bfaf6b2c89fb: Processing first storage report for DS-dbef2dca-b26d-4406-ae32-af85cc3f1faf from datanode DatanodeRegistration(127.0.0.1:36379, datanodeUuid=c5a7def3-9666-447e-acc7-b9cce9d6e6b1, infoPort=43595, infoSecurePort=0, ipcPort=42225, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252) 2024-12-09T11:23:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb653fb8c61d060d2 with lease ID 0xce13bfaf6b2c89fb: from storage DS-dbef2dca-b26d-4406-ae32-af85cc3f1faf node DatanodeRegistration(127.0.0.1:36379, datanodeUuid=c5a7def3-9666-447e-acc7-b9cce9d6e6b1, infoPort=43595, infoSecurePort=0, ipcPort=42225, storageInfo=lv=-57;cid=testClusterID;nsid=1810252091;c=1733743384252), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:05,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb 2024-12-09T11:23:05,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/zookeeper_0, clientPort=59080, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:23:05,014 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59080 2024-12-09T11:23:05,015 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:23:05,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:23:05,030 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6 with version=8 2024-12-09T11:23:05,030 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:23:05,033 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:23:05,033 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:05,034 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36753 2024-12-09T11:23:05,036 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36753 connecting to ZooKeeper ensemble=127.0.0.1:59080 2024-12-09T11:23:05,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:367530x0, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:05,043 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36753-0x1012aec29d50000 connected 2024-12-09T11:23:05,062 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,064 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:05,067 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6, hbase.cluster.distributed=false 2024-12-09T11:23:05,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:05,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36753 2024-12-09T11:23:05,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36753 2024-12-09T11:23:05,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36753 2024-12-09T11:23:05,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36753 2024-12-09T11:23:05,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36753 2024-12-09T11:23:05,092 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:05,092 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:05,092 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:05,092 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:05,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:05,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:05,093 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:23:05,093 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:05,094 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39035 2024-12-09T11:23:05,095 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39035 connecting to ZooKeeper ensemble=127.0.0.1:59080 2024-12-09T11:23:05,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390350x0, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:05,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39035-0x1012aec29d50001 connected 2024-12-09T11:23:05,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:05,104 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:23:05,107 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:23:05,107 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:23:05,109 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:05,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39035 2024-12-09T11:23:05,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39035 2024-12-09T11:23:05,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39035 2024-12-09T11:23:05,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39035 2024-12-09T11:23:05,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39035 2024-12-09T11:23:05,137 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:36753 2024-12-09T11:23:05,138 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:05,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:05,141 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:23:05,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,143 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:23:05,147 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,36753,1733743385032 from backup master directory 2024-12-09T11:23:05,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:05,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:05,149 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:05,149 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,156 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/hbase.id] with ID: f1bb520e-cdd4-4fb2-b64a-1de702273765 2024-12-09T11:23:05,156 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/.tmp/hbase.id 2024-12-09T11:23:05,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:05,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:05,165 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/.tmp/hbase.id]:[hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/hbase.id] 2024-12-09T11:23:05,180 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:05,181 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:23:05,182 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T11:23:05,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:23:05,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:23:05,193 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:23:05,194 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:23:05,194 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:05,207 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store 2024-12-09T11:23:05,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:23:05,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:23:05,216 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:05,216 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:23:05,216 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:05,216 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:05,216 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:23:05,216 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:05,216 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:05,217 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743385216Disabling compacts and flushes for region at 1733743385216Disabling writes for close at 1733743385216Writing region close event to WAL at 1733743385216Closed at 1733743385216 2024-12-09T11:23:05,218 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/.initializing 2024-12-09T11:23:05,218 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/WALs/2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,221 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C36753%2C1733743385032, suffix=, logDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/WALs/2dff3a36d44f,36753,1733743385032, archiveDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/oldWALs, maxLogs=10 2024-12-09T11:23:05,222 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C36753%2C1733743385032.1733743385222 2024-12-09T11:23:05,233 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/WALs/2dff3a36d44f,36753,1733743385032/2dff3a36d44f%2C36753%2C1733743385032.1733743385222 2024-12-09T11:23:05,238 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43595:43595),(127.0.0.1/127.0.0.1:44937:44937)] 2024-12-09T11:23:05,239 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:05,240 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:05,240 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,240 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:23:05,244 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:05,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:23:05,247 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:05,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:23:05,250 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:05,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,252 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:23:05,252 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:05,253 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,254 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,254 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,256 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,256 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,257 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:23:05,258 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:05,261 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:05,262 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774258, jitterRate=-0.015480488538742065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:23:05,263 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743385240Initializing all the Stores at 1733743385241 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743385241Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743385242 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743385242Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743385242Cleaning up temporary data from old regions at 1733743385256 (+14 ms)Region opened successfully at 1733743385263 (+7 ms) 2024-12-09T11:23:05,263 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:23:05,268 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c5a176, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:05,269 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:23:05,270 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:23:05,270 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:23:05,270 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:23:05,271 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T11:23:05,271 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T11:23:05,272 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:23:05,280 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:23:05,281 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:23:05,283 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:23:05,283 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:23:05,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:23:05,286 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:23:05,286 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:23:05,287 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:23:05,289 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:23:05,290 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:23:05,292 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:23:05,294 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:23:05,295 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:23:05,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:05,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:05,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,299 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,36753,1733743385032, sessionid=0x1012aec29d50000, setting cluster-up flag (Was=false) 2024-12-09T11:23:05,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,311 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:23:05,312 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,322 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:23:05,325 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:05,328 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:23:05,330 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:05,331 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:23:05,331 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:23:05,331 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,36753,1733743385032 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:05,333 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,335 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743415335 2024-12-09T11:23:05,335 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:23:05,335 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:23:05,335 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:23:05,335 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:23:05,336 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:23:05,336 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:23:05,336 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,336 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:05,336 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:23:05,338 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,338 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:23:05,339 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:23:05,339 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:23:05,339 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:23:05,342 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:23:05,342 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:23:05,343 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743385342,5,FailOnTimeoutGroup] 2024-12-09T11:23:05,343 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743385343,5,FailOnTimeoutGroup] 2024-12-09T11:23:05,343 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,343 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:23:05,343 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,343 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:23:05,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:23:05,421 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(746): ClusterId : f1bb520e-cdd4-4fb2-b64a-1de702273765 2024-12-09T11:23:05,421 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:23:05,424 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:23:05,424 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:23:05,427 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:23:05,427 DEBUG [RS:0;2dff3a36d44f:39035 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54fde7d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:05,445 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:39035 2024-12-09T11:23:05,445 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:23:05,445 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:23:05,445 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:23:05,446 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,36753,1733743385032 with port=39035, startcode=1733743385092 2024-12-09T11:23:05,446 DEBUG [RS:0;2dff3a36d44f:39035 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:23:05,453 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33969, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:23:05,453 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36753 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,453 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36753 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,456 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6 2024-12-09T11:23:05,456 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39687 2024-12-09T11:23:05,456 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:23:05,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:05,460 DEBUG [RS:0;2dff3a36d44f:39035 {}] zookeeper.ZKUtil(111): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,460 WARN [RS:0;2dff3a36d44f:39035 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:05,460 INFO [RS:0;2dff3a36d44f:39035 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:05,460 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,39035,1733743385092] 2024-12-09T11:23:05,460 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/WALs/2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,464 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:23:05,467 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:23:05,468 INFO [RS:0;2dff3a36d44f:39035 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:23:05,468 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,469 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:23:05,471 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:23:05,471 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,471 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,471 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,471 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,471 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,471 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:05,472 DEBUG [RS:0;2dff3a36d44f:39035 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:05,473 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,473 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,473 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,473 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,473 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,473 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,39035,1733743385092-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:05,494 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:23:05,495 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,39035,1733743385092-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,495 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,495 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.Replication(171): 2dff3a36d44f,39035,1733743385092 started 2024-12-09T11:23:05,515 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:05,515 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,39035,1733743385092, RpcServer on 2dff3a36d44f/172.17.0.3:39035, sessionid=0x1012aec29d50001 2024-12-09T11:23:05,516 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:23:05,516 DEBUG [RS:0;2dff3a36d44f:39035 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,516 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,39035,1733743385092' 2024-12-09T11:23:05,516 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:23:05,516 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:23:05,517 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:23:05,517 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:23:05,517 DEBUG [RS:0;2dff3a36d44f:39035 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,517 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,39035,1733743385092' 2024-12-09T11:23:05,517 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:23:05,517 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:23:05,518 DEBUG [RS:0;2dff3a36d44f:39035 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:23:05,518 INFO [RS:0;2dff3a36d44f:39035 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:23:05,518 INFO [RS:0;2dff3a36d44f:39035 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:23:05,621 INFO [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C39035%2C1733743385092, suffix=, logDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/WALs/2dff3a36d44f,39035,1733743385092, archiveDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/oldWALs, maxLogs=32 2024-12-09T11:23:05,622 INFO [RS:0;2dff3a36d44f:39035 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C39035%2C1733743385092.1733743385622 2024-12-09T11:23:05,632 INFO [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/WALs/2dff3a36d44f,39035,1733743385092/2dff3a36d44f%2C39035%2C1733743385092.1733743385622 2024-12-09T11:23:05,635 DEBUG [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44937:44937),(127.0.0.1/127.0.0.1:43595:43595)] 2024-12-09T11:23:05,752 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:23:05,752 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6 2024-12-09T11:23:05,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741833_1009 (size=32) 2024-12-09T11:23:05,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741833_1009 (size=32) 2024-12-09T11:23:05,762 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:05,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:23:05,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:23:05,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:05,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:23:05,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:23:05,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:05,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:23:05,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:23:05,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:05,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:23:05,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:23:05,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:05,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:05,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:23:05,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740 2024-12-09T11:23:05,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740 2024-12-09T11:23:05,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:23:05,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:23:05,776 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:23:05,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:23:05,780 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:05,780 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725243, jitterRate=-0.07780608534812927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:23:05,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743385763Initializing all the Stores at 1733743385764 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743385764Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743385764Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743385764Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743385764Cleaning up temporary data from old regions at 1733743385776 (+12 ms)Region opened successfully at 1733743385781 (+5 ms) 2024-12-09T11:23:05,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:23:05,781 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:23:05,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:23:05,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:23:05,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:23:05,782 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:05,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743385781Disabling compacts and flushes for region at 1733743385781Disabling writes for close at 1733743385781Writing region close event to WAL at 1733743385782 (+1 ms)Closed at 1733743385782 2024-12-09T11:23:05,784 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:05,784 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:23:05,784 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:23:05,786 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:23:05,787 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:23:05,938 DEBUG [2dff3a36d44f:36753 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:23:05,939 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:05,941 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,39035,1733743385092, state=OPENING 2024-12-09T11:23:05,943 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:23:05,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:05,945 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:23:05,945 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:05,945 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:05,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,39035,1733743385092}] 2024-12-09T11:23:06,099 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:23:06,102 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49121, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:23:06,107 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:23:06,107 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:06,110 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C39035%2C1733743385092.meta, suffix=.meta, logDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/WALs/2dff3a36d44f,39035,1733743385092, archiveDir=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/oldWALs, maxLogs=32 2024-12-09T11:23:06,111 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C39035%2C1733743385092.meta.1733743386111.meta 2024-12-09T11:23:06,117 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/WALs/2dff3a36d44f,39035,1733743385092/2dff3a36d44f%2C39035%2C1733743385092.meta.1733743386111.meta 2024-12-09T11:23:06,118 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43595:43595),(127.0.0.1/127.0.0.1:44937:44937)] 2024-12-09T11:23:06,119 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:06,119 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:23:06,120 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:23:06,120 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:23:06,120 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:23:06,120 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:06,120 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:23:06,120 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:23:06,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:23:06,122 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:23:06,123 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:06,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:06,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:23:06,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:23:06,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:06,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:06,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:23:06,125 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:23:06,126 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:06,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:06,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:23:06,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:23:06,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:06,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:06,128 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:23:06,129 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740 2024-12-09T11:23:06,130 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740 2024-12-09T11:23:06,132 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:23:06,132 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:23:06,132 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:23:06,134 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:23:06,135 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866669, jitterRate=0.10202744603157043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:23:06,135 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:23:06,136 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743386120Writing region info on filesystem at 1733743386120Initializing all the Stores at 1733743386121 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743386121Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743386121Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743386121Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743386121Cleaning up temporary data from old regions at 1733743386132 (+11 ms)Running coprocessor post-open hooks at 1733743386135 (+3 ms)Region opened successfully at 1733743386136 (+1 ms) 2024-12-09T11:23:06,137 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743386099 2024-12-09T11:23:06,141 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:23:06,141 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:23:06,142 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:06,144 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,39035,1733743385092, state=OPEN 2024-12-09T11:23:06,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:23:06,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:23:06,150 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:06,150 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:06,150 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:06,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:23:06,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,39035,1733743385092 in 205 msec 2024-12-09T11:23:06,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:23:06,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 370 msec 2024-12-09T11:23:06,158 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:06,158 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:23:06,160 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:23:06,160 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39035,1733743385092, seqNum=-1] 2024-12-09T11:23:06,161 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:23:06,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45329, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:23:06,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 840 msec 2024-12-09T11:23:06,170 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743386170, completionTime=-1 2024-12-09T11:23:06,171 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:23:06,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:23:06,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T11:23:06,173 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:23:06,173 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743446173 2024-12-09T11:23:06,173 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743506173 2024-12-09T11:23:06,173 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T11:23:06,174 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36753,1733743385032-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,174 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36753,1733743385032-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,174 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36753,1733743385032-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,174 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:36753, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,174 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,174 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,176 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.030sec 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36753,1733743385032-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:06,179 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36753,1733743385032-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:23:06,182 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:23:06,182 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:23:06,182 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36753,1733743385032-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:06,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c243edc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:06,221 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,36753,-1 for getting cluster id 2024-12-09T11:23:06,221 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:23:06,223 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f1bb520e-cdd4-4fb2-b64a-1de702273765' 2024-12-09T11:23:06,224 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:23:06,224 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f1bb520e-cdd4-4fb2-b64a-1de702273765" 2024-12-09T11:23:06,224 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13680bba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:06,224 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,36753,-1] 2024-12-09T11:23:06,225 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:23:06,225 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:06,227 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48506, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:23:06,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24c39b4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:06,229 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:23:06,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,39035,1733743385092, seqNum=-1] 2024-12-09T11:23:06,231 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:23:06,232 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50436, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:23:06,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:06,235 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:06,239 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:23:06,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:23:06,240 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:23:06,240 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:06,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:06,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:06,240 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:23:06,240 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:23:06,241 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=602871402, stopped=false 2024-12-09T11:23:06,241 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,36753,1733743385032 2024-12-09T11:23:06,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:06,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:06,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:06,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:06,244 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:23:06,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:06,244 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:23:06,244 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:06,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:06,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:06,245 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,39035,1733743385092' ***** 2024-12-09T11:23:06,245 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:23:06,245 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:23:06,246 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:39035. 2024-12-09T11:23:06,246 DEBUG [RS:0;2dff3a36d44f:39035 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:06,246 DEBUG [RS:0;2dff3a36d44f:39035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:23:06,246 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:23:06,247 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T11:23:06,247 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T11:23:06,247 DEBUG [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T11:23:06,247 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:23:06,247 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:23:06,247 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:23:06,247 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:23:06,247 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:23:06,247 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T11:23:06,266 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/.tmp/ns/19de7a384d4044b08cd761d631f23a1b is 43, key is default/ns:d/1733743386163/Put/seqid=0 2024-12-09T11:23:06,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741835_1011 (size=5153) 2024-12-09T11:23:06,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741835_1011 (size=5153) 2024-12-09T11:23:06,273 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/.tmp/ns/19de7a384d4044b08cd761d631f23a1b 2024-12-09T11:23:06,282 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/.tmp/ns/19de7a384d4044b08cd761d631f23a1b as hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/ns/19de7a384d4044b08cd761d631f23a1b 2024-12-09T11:23:06,289 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/ns/19de7a384d4044b08cd761d631f23a1b, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T11:23:06,291 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-12-09T11:23:06,291 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:23:06,297 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:23:06,298 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:23:06,298 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:06,298 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743386247Running coprocessor pre-close hooks at 1733743386247Disabling compacts and flushes for region at 1733743386247Disabling writes for close at 1733743386247Obtaining lock to block concurrent updates at 1733743386247Preparing flush snapshotting stores in 1588230740 at 1733743386247Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733743386248 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733743386249 (+1 ms)Flushing 1588230740/ns: creating writer at 1733743386249Flushing 1588230740/ns: appending metadata at 1733743386266 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733743386266Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e310e6: reopening flushed file at 1733743386281 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1733743386291 (+10 ms)Writing region close event to WAL at 1733743386293 (+2 ms)Running coprocessor post-close hooks at 1733743386298 (+5 ms)Closed at 1733743386298 2024-12-09T11:23:06,299 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:06,447 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,39035,1733743385092; all regions closed. 2024-12-09T11:23:06,448 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,448 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,448 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,448 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,449 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741834_1010 (size=1152) 2024-12-09T11:23:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741834_1010 (size=1152) 2024-12-09T11:23:06,455 DEBUG [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/oldWALs 2024-12-09T11:23:06,455 INFO [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C39035%2C1733743385092.meta:.meta(num 1733743386111) 2024-12-09T11:23:06,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741832_1008 (size=93) 2024-12-09T11:23:06,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741832_1008 (size=93) 2024-12-09T11:23:06,461 DEBUG [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/oldWALs 2024-12-09T11:23:06,461 INFO [RS:0;2dff3a36d44f:39035 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C39035%2C1733743385092:(num 1733743385622) 2024-12-09T11:23:06,461 DEBUG [RS:0;2dff3a36d44f:39035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:06,461 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:06,461 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:06,461 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:06,462 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:06,462 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:06,462 INFO [RS:0;2dff3a36d44f:39035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39035 2024-12-09T11:23:06,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,39035,1733743385092 2024-12-09T11:23:06,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:06,464 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:06,466 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,39035,1733743385092] 2024-12-09T11:23:06,469 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,39035,1733743385092 already deleted, retry=false 2024-12-09T11:23:06,469 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,39035,1733743385092 expired; onlineServers=0 2024-12-09T11:23:06,469 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,36753,1733743385032' ***** 2024-12-09T11:23:06,469 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:23:06,469 INFO [M:0;2dff3a36d44f:36753 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:06,469 INFO [M:0;2dff3a36d44f:36753 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:06,469 DEBUG [M:0;2dff3a36d44f:36753 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:23:06,469 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:23:06,469 DEBUG [M:0;2dff3a36d44f:36753 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:23:06,469 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743385343 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743385343,5,FailOnTimeoutGroup] 2024-12-09T11:23:06,469 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743385342 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743385342,5,FailOnTimeoutGroup] 2024-12-09T11:23:06,469 INFO [M:0;2dff3a36d44f:36753 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:06,469 INFO [M:0;2dff3a36d44f:36753 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:06,469 DEBUG [M:0;2dff3a36d44f:36753 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:23:06,470 INFO [M:0;2dff3a36d44f:36753 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:23:06,470 INFO [M:0;2dff3a36d44f:36753 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:23:06,470 INFO [M:0;2dff3a36d44f:36753 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:23:06,470 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:23:06,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:23:06,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:06,471 DEBUG [M:0;2dff3a36d44f:36753 {}] zookeeper.ZKUtil(347): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:23:06,471 WARN [M:0;2dff3a36d44f:36753 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:23:06,472 INFO [M:0;2dff3a36d44f:36753 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/.lastflushedseqids 2024-12-09T11:23:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741836_1012 (size=108) 2024-12-09T11:23:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741836_1012 (size=108) 2024-12-09T11:23:06,479 INFO [M:0;2dff3a36d44f:36753 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:23:06,479 INFO [M:0;2dff3a36d44f:36753 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:23:06,479 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:23:06,479 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:06,479 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:06,479 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:23:06,479 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:06,479 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T11:23:06,497 DEBUG [M:0;2dff3a36d44f:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5dffd2f6ecb2401b9b3531dcda79d7d3 is 82, key is hbase:meta,,1/info:regioninfo/1733743386142/Put/seqid=0 2024-12-09T11:23:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741837_1013 (size=5672) 2024-12-09T11:23:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741837_1013 (size=5672) 2024-12-09T11:23:06,502 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5dffd2f6ecb2401b9b3531dcda79d7d3 2024-12-09T11:23:06,531 DEBUG [M:0;2dff3a36d44f:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/54f5573e65634515a37bfb3ad6666f05 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733743386169/Put/seqid=0 2024-12-09T11:23:06,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741838_1014 (size=5275) 2024-12-09T11:23:06,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741838_1014 (size=5275) 2024-12-09T11:23:06,562 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/54f5573e65634515a37bfb3ad6666f05 2024-12-09T11:23:06,566 INFO [RS:0;2dff3a36d44f:39035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:06,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:06,566 INFO [RS:0;2dff3a36d44f:39035 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,39035,1733743385092; zookeeper connection closed. 2024-12-09T11:23:06,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39035-0x1012aec29d50001, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:06,571 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1830e9c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1830e9c0 2024-12-09T11:23:06,571 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T11:23:06,606 DEBUG [M:0;2dff3a36d44f:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c27ed017d4f84e7b913589a21e26f347 is 69, key is 2dff3a36d44f,39035,1733743385092/rs:state/1733743385454/Put/seqid=0 2024-12-09T11:23:06,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741839_1015 (size=5156) 2024-12-09T11:23:06,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741839_1015 (size=5156) 2024-12-09T11:23:06,663 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c27ed017d4f84e7b913589a21e26f347 2024-12-09T11:23:06,710 DEBUG [M:0;2dff3a36d44f:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1059082c6fc34317aaa85d0e4754d829 is 52, key is load_balancer_on/state:d/1733743386238/Put/seqid=0 2024-12-09T11:23:06,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741840_1016 (size=5056) 2024-12-09T11:23:06,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741840_1016 (size=5056) 2024-12-09T11:23:06,732 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1059082c6fc34317aaa85d0e4754d829 2024-12-09T11:23:06,742 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5dffd2f6ecb2401b9b3531dcda79d7d3 as hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5dffd2f6ecb2401b9b3531dcda79d7d3 2024-12-09T11:23:06,749 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5dffd2f6ecb2401b9b3531dcda79d7d3, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T11:23:06,751 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/54f5573e65634515a37bfb3ad6666f05 as hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/54f5573e65634515a37bfb3ad6666f05 2024-12-09T11:23:06,759 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/54f5573e65634515a37bfb3ad6666f05, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T11:23:06,761 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c27ed017d4f84e7b913589a21e26f347 as hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c27ed017d4f84e7b913589a21e26f347 2024-12-09T11:23:06,772 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c27ed017d4f84e7b913589a21e26f347, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T11:23:06,778 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1059082c6fc34317aaa85d0e4754d829 as hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1059082c6fc34317aaa85d0e4754d829 2024-12-09T11:23:06,786 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39687/user/jenkins/test-data/80b8f2ba-3528-8622-7b42-1e11345a1fb6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1059082c6fc34317aaa85d0e4754d829, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T11:23:06,787 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 308ms, sequenceid=29, compaction requested=false 2024-12-09T11:23:06,799 INFO [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:06,799 DEBUG [M:0;2dff3a36d44f:36753 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743386479Disabling compacts and flushes for region at 1733743386479Disabling writes for close at 1733743386479Obtaining lock to block concurrent updates at 1733743386479Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743386479Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733743386480 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743386480Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743386480Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743386496 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743386496Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743386508 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743386529 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743386530 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743386571 (+41 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743386605 (+34 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743386605Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743386670 (+65 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743386709 (+39 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743386709Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b513161: reopening flushed file at 1733743386741 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19c778e7: reopening flushed file at 1733743386750 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6579696e: reopening flushed file at 1733743386759 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b78adea: reopening flushed file at 1733743386773 (+14 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 308ms, sequenceid=29, compaction requested=false at 1733743386787 (+14 ms)Writing region close event to WAL at 1733743386799 (+12 ms)Closed at 1733743386799 2024-12-09T11:23:06,802 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,803 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,803 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,803 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:06,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45679 is added to blk_1073741830_1006 (size=10311) 2024-12-09T11:23:06,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36379 is added to blk_1073741830_1006 (size=10311) 2024-12-09T11:23:07,208 INFO [M:0;2dff3a36d44f:36753 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:23:07,208 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:07,208 INFO [M:0;2dff3a36d44f:36753 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36753 2024-12-09T11:23:07,208 INFO [M:0;2dff3a36d44f:36753 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:07,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:07,311 INFO [M:0;2dff3a36d44f:36753 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:07,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x1012aec29d50000, quorum=127.0.0.1:59080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:07,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d2d0bcc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:07,320 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57c7b86a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:07,320 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:07,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2502c8c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:07,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16c9fe9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:07,323 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:07,323 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:07,323 WARN [BP-791067690-172.17.0.3-1733743384252 heartbeating to localhost/127.0.0.1:39687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:07,323 WARN [BP-791067690-172.17.0.3-1733743384252 heartbeating to localhost/127.0.0.1:39687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791067690-172.17.0.3-1733743384252 (Datanode Uuid c5a7def3-9666-447e-acc7-b9cce9d6e6b1) service to localhost/127.0.0.1:39687 2024-12-09T11:23:07,325 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data3/current/BP-791067690-172.17.0.3-1733743384252 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:07,325 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data4/current/BP-791067690-172.17.0.3-1733743384252 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:07,325 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:07,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a787e38{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:07,335 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@144bd696{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:07,335 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:07,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22ba3420{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:07,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ca07ff3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:07,337 WARN [BP-791067690-172.17.0.3-1733743384252 heartbeating to localhost/127.0.0.1:39687 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:07,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:07,337 WARN [BP-791067690-172.17.0.3-1733743384252 heartbeating to localhost/127.0.0.1:39687 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-791067690-172.17.0.3-1733743384252 (Datanode Uuid 10fcfe74-71d9-4d09-80a2-d0b0605794af) service to localhost/127.0.0.1:39687 2024-12-09T11:23:07,337 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:07,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data1/current/BP-791067690-172.17.0.3-1733743384252 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:07,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/cluster_56c7653f-d6f8-1f7c-6220-64a27aa2382b/data/data2/current/BP-791067690-172.17.0.3-1733743384252 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:07,339 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:07,350 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5316f6d0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:23:07,350 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ced705f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:07,351 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:07,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73737af2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:07,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@246cb388{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:07,358 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:23:07,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:23:07,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:23:07,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.log.dir so I do NOT create it in target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd 2024-12-09T11:23:07,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1061f397-f685-9d8f-cf61-e957ecad72fb/hadoop.tmp.dir so I do NOT create it in target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd 2024-12-09T11:23:07,382 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5, deleteOnExit=true 2024-12-09T11:23:07,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:23:07,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/test.cache.data in system properties and HBase conf 2024-12-09T11:23:07,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:23:07,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:23:07,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:23:07,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:23:07,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:23:07,383 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:23:07,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:23:07,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:23:07,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:23:07,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:23:07,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:23:07,404 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:23:07,474 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:07,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:07,510 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:07,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:07,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:07,524 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:23:07,527 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:07,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@576276fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:07,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35be1ff6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:07,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c7e90d4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-42907-hadoop-hdfs-3_4_1-tests_jar-_-any-1250094918508303106/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:23:07,696 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e510a6a{HTTP/1.1, (http/1.1)}{localhost:42907} 2024-12-09T11:23:07,696 INFO [Time-limited test {}] server.Server(415): Started @108969ms 2024-12-09T11:23:07,718 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:23:07,922 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:07,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:07,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:07,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:07,931 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:07,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@581c9bfb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:07,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5da4e9e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:07,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:07,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:08,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3130a42a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-43915-hadoop-hdfs-3_4_1-tests_jar-_-any-6656804669332347677/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:08,093 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@341c1108{HTTP/1.1, (http/1.1)}{localhost:43915} 2024-12-09T11:23:08,093 INFO [Time-limited test {}] server.Server(415): Started @109366ms 2024-12-09T11:23:08,095 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:08,186 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:23:08,192 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:08,235 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:08,243 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:08,244 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:08,260 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:08,265 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:08,274 WARN [Thread-653 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data1/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:08,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:08,296 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:08,296 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:23:08,303 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data2/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:08,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5192cae1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:08,309 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b8a0b06{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:08,372 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3abccd3d0eed14c8 with lease ID 0x62224ef78c5454b0: Processing first storage report for DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e from datanode DatanodeRegistration(127.0.0.1:35777, datanodeUuid=afe86b40-b1a5-4ed5-9b65-30addde00ac5, infoPort=43067, infoSecurePort=0, ipcPort=35811, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3abccd3d0eed14c8 with lease ID 0x62224ef78c5454b0: from storage DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e node DatanodeRegistration(127.0.0.1:35777, datanodeUuid=afe86b40-b1a5-4ed5-9b65-30addde00ac5, infoPort=43067, infoSecurePort=0, ipcPort=35811, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3abccd3d0eed14c8 with lease ID 0x62224ef78c5454b0: Processing first storage report for DS-8919198e-e6e3-4f95-a635-17faac6231a0 from datanode DatanodeRegistration(127.0.0.1:35777, datanodeUuid=afe86b40-b1a5-4ed5-9b65-30addde00ac5, infoPort=43067, infoSecurePort=0, ipcPort=35811, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3abccd3d0eed14c8 with lease ID 0x62224ef78c5454b0: from storage DS-8919198e-e6e3-4f95-a635-17faac6231a0 node DatanodeRegistration(127.0.0.1:35777, datanodeUuid=afe86b40-b1a5-4ed5-9b65-30addde00ac5, infoPort=43067, infoSecurePort=0, ipcPort=35811, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:23:08,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51f3eae5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-38307-hadoop-hdfs-3_4_1-tests_jar-_-any-14873576487427282092/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:08,453 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fbfcb3a{HTTP/1.1, (http/1.1)}{localhost:38307} 2024-12-09T11:23:08,453 INFO [Time-limited test {}] server.Server(415): Started @109726ms 2024-12-09T11:23:08,456 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:08,646 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data3/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:08,659 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data4/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:08,693 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:08,697 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdcc574aa3304260a with lease ID 0x62224ef78c5454b1: Processing first storage report for DS-185b7880-58c5-4218-ad2f-2db231ca8105 from datanode DatanodeRegistration(127.0.0.1:37587, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=35347, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:08,697 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcc574aa3304260a with lease ID 0x62224ef78c5454b1: from storage DS-185b7880-58c5-4218-ad2f-2db231ca8105 node DatanodeRegistration(127.0.0.1:37587, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=35347, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:08,698 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdcc574aa3304260a with lease ID 0x62224ef78c5454b1: Processing first storage report for DS-c0f2d9c4-ea98-4623-83c1-2b07a57697bb from datanode DatanodeRegistration(127.0.0.1:37587, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=35347, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:08,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcc574aa3304260a with lease ID 0x62224ef78c5454b1: from storage DS-c0f2d9c4-ea98-4623-83c1-2b07a57697bb node DatanodeRegistration(127.0.0.1:37587, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=35347, infoSecurePort=0, ipcPort=33067, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:08,764 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd 2024-12-09T11:23:08,777 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/zookeeper_0, clientPort=60908, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:23:08,779 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60908 2024-12-09T11:23:08,779 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:08,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:08,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:23:08,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:23:08,818 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e with version=8 2024-12-09T11:23:08,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:23:08,823 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:08,823 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:08,823 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:08,823 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:08,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:08,824 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:08,824 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:23:08,824 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:08,835 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43697 2024-12-09T11:23:08,837 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43697 connecting to ZooKeeper ensemble=127.0.0.1:60908 2024-12-09T11:23:08,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:436970x0, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:08,859 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43697-0x1012aec38870000 connected 2024-12-09T11:23:08,893 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:08,895 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:08,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:08,898 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e, hbase.cluster.distributed=false 2024-12-09T11:23:08,900 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:08,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43697 2024-12-09T11:23:08,906 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43697 2024-12-09T11:23:08,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43697 2024-12-09T11:23:08,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43697 2024-12-09T11:23:08,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43697 2024-12-09T11:23:08,939 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:23:08,939 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:08,940 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42933 2024-12-09T11:23:08,942 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42933 connecting to ZooKeeper ensemble=127.0.0.1:60908 2024-12-09T11:23:08,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:08,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:08,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429330x0, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:08,957 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429330x0, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:08,958 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:23:08,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42933-0x1012aec38870001 connected 2024-12-09T11:23:08,966 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:23:08,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:23:08,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:08,982 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42933 2024-12-09T11:23:08,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42933 2024-12-09T11:23:08,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42933 2024-12-09T11:23:09,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42933 2024-12-09T11:23:09,019 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42933 2024-12-09T11:23:09,036 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:43697 2024-12-09T11:23:09,036 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:09,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:09,039 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:23:09,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,043 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:23:09,044 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,43697,1733743388823 from backup master directory 2024-12-09T11:23:09,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:09,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:09,046 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:09,047 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,064 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/hbase.id] with ID: 7666142b-462e-448e-b154-0af2aea9f636 2024-12-09T11:23:09,064 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/.tmp/hbase.id 2024-12-09T11:23:09,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:09,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:09,089 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/.tmp/hbase.id]:[hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/hbase.id] 2024-12-09T11:23:09,107 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:09,107 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:23:09,109 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T11:23:09,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:23:09,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:23:09,130 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:23:09,131 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:23:09,131 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:09,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:09,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:09,157 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store 2024-12-09T11:23:09,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:23:09,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:23:09,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:09,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:23:09,171 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:09,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:09,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:23:09,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:09,171 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:09,171 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743389171Disabling compacts and flushes for region at 1733743389171Disabling writes for close at 1733743389171Writing region close event to WAL at 1733743389171Closed at 1733743389171 2024-12-09T11:23:09,172 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/.initializing 2024-12-09T11:23:09,173 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,176 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C43697%2C1733743388823, suffix=, logDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823, archiveDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/oldWALs, maxLogs=10 2024-12-09T11:23:09,176 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C43697%2C1733743388823.1733743389176 2024-12-09T11:23:09,192 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 2024-12-09T11:23:09,197 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35347:35347),(127.0.0.1/127.0.0.1:43067:43067)] 2024-12-09T11:23:09,210 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:09,211 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:09,211 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,211 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:23:09,215 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:23:09,217 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:09,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:23:09,219 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:09,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:23:09,221 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:09,222 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,223 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,223 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,224 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,224 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,225 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:23:09,226 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:09,229 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:09,229 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871831, jitterRate=0.10859069228172302}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:23:09,230 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743389211Initializing all the Stores at 1733743389212 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389212Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743389213 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743389213Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743389213Cleaning up temporary data from old regions at 1733743389224 (+11 ms)Region opened successfully at 1733743389230 (+6 ms) 2024-12-09T11:23:09,231 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:23:09,234 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38bf8810, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:09,235 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:23:09,235 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:23:09,235 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:23:09,235 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:23:09,236 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T11:23:09,236 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T11:23:09,236 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:23:09,239 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:23:09,240 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:23:09,242 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:23:09,242 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:23:09,243 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:23:09,244 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:23:09,244 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:23:09,245 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:23:09,246 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:23:09,247 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:23:09,249 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:23:09,251 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:23:09,252 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:23:09,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:09,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:09,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,255 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,43697,1733743388823, sessionid=0x1012aec38870000, setting cluster-up flag (Was=false) 2024-12-09T11:23:09,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,265 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:23:09,266 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,277 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:23:09,278 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:09,280 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:23:09,282 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:09,282 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:23:09,282 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:23:09,282 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,43697,1733743388823 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:09,284 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,286 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:09,286 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743419287 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:23:09,287 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:23:09,287 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,287 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:23:09,290 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:23:09,291 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:23:09,291 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:23:09,294 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:23:09,294 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:23:09,294 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743389294,5,FailOnTimeoutGroup] 2024-12-09T11:23:09,295 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743389294,5,FailOnTimeoutGroup] 2024-12-09T11:23:09,295 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,295 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:23:09,295 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,295 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:23:09,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:23:09,303 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:23:09,303 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e 2024-12-09T11:23:09,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:23:09,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:23:09,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:09,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:23:09,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:23:09,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:23:09,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:23:09,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,321 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(746): ClusterId : 7666142b-462e-448e-b154-0af2aea9f636 2024-12-09T11:23:09,322 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:23:09,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:23:09,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:23:09,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,324 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:23:09,324 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:23:09,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:23:09,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:23:09,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,329 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:23:09,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,329 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:23:09,329 DEBUG [RS:0;2dff3a36d44f:42933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff67fa3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:09,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740 2024-12-09T11:23:09,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740 2024-12-09T11:23:09,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:23:09,332 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:23:09,333 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:23:09,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:23:09,337 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:09,338 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688989, jitterRate=-0.12390570342540741}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:23:09,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743389316Initializing all the Stores at 1733743389317 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389317Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389317Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743389317Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389317Cleaning up temporary data from old regions at 1733743389332 (+15 ms)Region opened successfully at 1733743389339 (+7 ms) 2024-12-09T11:23:09,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:23:09,339 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:23:09,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:23:09,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:23:09,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:23:09,340 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:09,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743389339Disabling compacts and flushes for region at 1733743389339Disabling writes for close at 1733743389339Writing region close event to WAL at 1733743389340 (+1 ms)Closed at 1733743389340 2024-12-09T11:23:09,343 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:09,343 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:23:09,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:23:09,345 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:23:09,347 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:23:09,348 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:42933 2024-12-09T11:23:09,348 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:23:09,348 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:23:09,348 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:23:09,349 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,43697,1733743388823 with port=42933, startcode=1733743388938 2024-12-09T11:23:09,350 DEBUG [RS:0;2dff3a36d44f:42933 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:23:09,353 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43535, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:23:09,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43697 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43697 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,356 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e 2024-12-09T11:23:09,356 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42671 2024-12-09T11:23:09,356 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:23:09,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:09,358 DEBUG [RS:0;2dff3a36d44f:42933 {}] zookeeper.ZKUtil(111): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,358 WARN [RS:0;2dff3a36d44f:42933 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:09,358 INFO [RS:0;2dff3a36d44f:42933 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:09,358 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,42933,1733743388938] 2024-12-09T11:23:09,358 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,362 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:23:09,365 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:23:09,366 INFO [RS:0;2dff3a36d44f:42933 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:23:09,367 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,367 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:23:09,368 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:23:09,368 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,368 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,368 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:09,369 DEBUG [RS:0;2dff3a36d44f:42933 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:09,378 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,378 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,378 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,378 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,378 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,378 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42933,1733743388938-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:09,394 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:23:09,394 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42933,1733743388938-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,394 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,394 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.Replication(171): 2dff3a36d44f,42933,1733743388938 started 2024-12-09T11:23:09,409 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:09,409 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,42933,1733743388938, RpcServer on 2dff3a36d44f/172.17.0.3:42933, sessionid=0x1012aec38870001 2024-12-09T11:23:09,410 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:23:09,410 DEBUG [RS:0;2dff3a36d44f:42933 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,410 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,42933,1733743388938' 2024-12-09T11:23:09,410 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:23:09,411 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:23:09,411 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:23:09,411 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:23:09,411 DEBUG [RS:0;2dff3a36d44f:42933 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,411 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,42933,1733743388938' 2024-12-09T11:23:09,411 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:23:09,412 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:23:09,412 DEBUG [RS:0;2dff3a36d44f:42933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:23:09,412 INFO [RS:0;2dff3a36d44f:42933 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:23:09,412 INFO [RS:0;2dff3a36d44f:42933 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:23:09,497 WARN [2dff3a36d44f:43697 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T11:23:09,515 INFO [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C42933%2C1733743388938, suffix=, logDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938, archiveDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs, maxLogs=32 2024-12-09T11:23:09,516 INFO [RS:0;2dff3a36d44f:42933 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.1733743389516 2024-12-09T11:23:09,524 INFO [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 2024-12-09T11:23:09,528 DEBUG [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43067:43067),(127.0.0.1/127.0.0.1:35347:35347)] 2024-12-09T11:23:09,748 DEBUG [2dff3a36d44f:43697 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:23:09,749 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,751 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,42933,1733743388938, state=OPENING 2024-12-09T11:23:09,753 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:23:09,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:09,756 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:09,756 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:23:09,756 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:09,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,42933,1733743388938}] 2024-12-09T11:23:09,910 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:23:09,917 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34945, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:23:09,922 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:23:09,922 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:09,924 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C42933%2C1733743388938.meta, suffix=.meta, logDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938, archiveDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs, maxLogs=32 2024-12-09T11:23:09,926 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta 2024-12-09T11:23:09,939 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta 2024-12-09T11:23:09,940 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35347:35347),(127.0.0.1/127.0.0.1:43067:43067)] 2024-12-09T11:23:09,944 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:09,944 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:23:09,945 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:23:09,945 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:23:09,945 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:23:09,945 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:09,945 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:23:09,945 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:23:09,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:23:09,971 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:23:09,971 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:23:09,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:23:09,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,977 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:23:09,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:23:09,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:23:09,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:23:09,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:09,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:09,981 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:23:09,982 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740 2024-12-09T11:23:09,983 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740 2024-12-09T11:23:09,985 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:23:09,985 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:23:09,986 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:23:09,987 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:23:09,988 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760317, jitterRate=-0.0332074910402298}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:23:09,989 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:23:09,990 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743389945Writing region info on filesystem at 1733743389945Initializing all the Stores at 1733743389951 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389951Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389951Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743389952 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743389952Cleaning up temporary data from old regions at 1733743389985 (+33 ms)Running coprocessor post-open hooks at 1733743389989 (+4 ms)Region opened successfully at 1733743389989 2024-12-09T11:23:09,991 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743389910 2024-12-09T11:23:09,994 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:23:09,994 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:23:09,996 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:09,997 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,42933,1733743388938, state=OPEN 2024-12-09T11:23:10,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:23:10,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:23:10,004 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:10,004 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:10,004 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:10,008 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:23:10,008 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,42933,1733743388938 in 248 msec 2024-12-09T11:23:10,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:23:10,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 665 msec 2024-12-09T11:23:10,014 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:10,014 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:23:10,016 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:23:10,017 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,42933,1733743388938, seqNum=-1] 2024-12-09T11:23:10,017 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:23:10,019 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:23:10,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 748 msec 2024-12-09T11:23:10,031 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743390031, completionTime=-1 2024-12-09T11:23:10,031 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:23:10,031 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:23:10,033 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:23:10,033 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743450033 2024-12-09T11:23:10,033 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743510033 2024-12-09T11:23:10,033 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T11:23:10,034 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43697,1733743388823-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,034 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43697,1733743388823-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,034 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43697,1733743388823-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,034 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:43697, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,034 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,034 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,036 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.991sec 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43697,1733743388823-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:10,038 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43697,1733743388823-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:23:10,041 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:23:10,041 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:23:10,041 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43697,1733743388823-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3008f2c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:10,122 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,43697,-1 for getting cluster id 2024-12-09T11:23:10,122 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:23:10,124 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7666142b-462e-448e-b154-0af2aea9f636' 2024-12-09T11:23:10,124 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:23:10,124 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7666142b-462e-448e-b154-0af2aea9f636" 2024-12-09T11:23:10,125 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c1fe91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:10,125 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,43697,-1] 2024-12-09T11:23:10,125 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:23:10,125 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:10,127 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:23:10,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3f9945, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:10,128 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:23:10,129 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,42933,1733743388938, seqNum=-1] 2024-12-09T11:23:10,129 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:23:10,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41906, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:23:10,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:10,133 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:10,166 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:23:10,184 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:10,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:10,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:10,184 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:10,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:10,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:10,185 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:23:10,185 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:10,185 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42489 2024-12-09T11:23:10,187 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42489 connecting to ZooKeeper ensemble=127.0.0.1:60908 2024-12-09T11:23:10,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:10,189 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:10,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424890x0, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:10,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T11:23:10,194 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42489-0x1012aec38870002 connected 2024-12-09T11:23:10,194 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T11:23:10,195 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:23:10,195 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:23:10,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:23:10,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:10,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42489 2024-12-09T11:23:10,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42489 2024-12-09T11:23:10,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42489 2024-12-09T11:23:10,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42489 2024-12-09T11:23:10,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42489 2024-12-09T11:23:10,203 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(746): ClusterId : 7666142b-462e-448e-b154-0af2aea9f636 2024-12-09T11:23:10,203 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:23:10,205 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:23:10,205 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:23:10,207 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:23:10,208 DEBUG [RS:1;2dff3a36d44f:42489 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@653cf202, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:10,221 DEBUG [RS:1;2dff3a36d44f:42489 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;2dff3a36d44f:42489 2024-12-09T11:23:10,221 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:23:10,221 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:23:10,221 DEBUG [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:23:10,222 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,43697,1733743388823 with port=42489, startcode=1733743390184 2024-12-09T11:23:10,222 DEBUG [RS:1;2dff3a36d44f:42489 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:23:10,224 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41123, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:23:10,224 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43697 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:10,224 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43697 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:10,226 DEBUG [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e 2024-12-09T11:23:10,226 DEBUG [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42671 2024-12-09T11:23:10,226 DEBUG [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:23:10,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:10,228 DEBUG [RS:1;2dff3a36d44f:42489 {}] zookeeper.ZKUtil(111): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:10,228 WARN [RS:1;2dff3a36d44f:42489 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:10,229 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,42489,1733743390184] 2024-12-09T11:23:10,229 INFO [RS:1;2dff3a36d44f:42489 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:10,229 DEBUG [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:10,232 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:23:10,234 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:23:10,234 INFO [RS:1;2dff3a36d44f:42489 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:23:10,234 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,234 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:23:10,235 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:23:10,235 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,235 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,235 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,235 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,235 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,235 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,235 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:10,236 DEBUG [RS:1;2dff3a36d44f:42489 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:10,238 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,238 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,238 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,238 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,238 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,239 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42489,1733743390184-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:10,254 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:23:10,254 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42489,1733743390184-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,254 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,254 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.Replication(171): 2dff3a36d44f,42489,1733743390184 started 2024-12-09T11:23:10,268 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:10,268 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,42489,1733743390184, RpcServer on 2dff3a36d44f/172.17.0.3:42489, sessionid=0x1012aec38870002 2024-12-09T11:23:10,268 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:23:10,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;2dff3a36d44f:42489,5,FailOnTimeoutGroup] 2024-12-09T11:23:10,268 DEBUG [RS:1;2dff3a36d44f:42489 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:10,268 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,42489,1733743390184' 2024-12-09T11:23:10,268 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:23:10,269 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-09T11:23:10,269 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:23:10,269 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T11:23:10,270 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:23:10,270 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:23:10,270 DEBUG [RS:1;2dff3a36d44f:42489 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:10,270 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,42489,1733743390184' 2024-12-09T11:23:10,270 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:23:10,270 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:23:10,270 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:10,270 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4ef7efc4 2024-12-09T11:23:10,271 DEBUG [RS:1;2dff3a36d44f:42489 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:23:10,271 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T11:23:10,271 INFO [RS:1;2dff3a36d44f:42489 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:23:10,271 INFO [RS:1;2dff3a36d44f:42489 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:23:10,272 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38572, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T11:23:10,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T11:23:10,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T11:23:10,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:23:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T11:23:10,277 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:23:10,277 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:10,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-09T11:23:10,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:23:10,279 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:23:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741835_1011 (size=393) 2024-12-09T11:23:10,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741835_1011 (size=393) 2024-12-09T11:23:10,290 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 693b5ab06e8c428d0553d9f2b9d9d929, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e 2024-12-09T11:23:10,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35777 is added to blk_1073741836_1012 (size=76) 2024-12-09T11:23:10,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37587 is added to blk_1073741836_1012 (size=76) 2024-12-09T11:23:10,297 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:10,297 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 693b5ab06e8c428d0553d9f2b9d9d929, disabling compactions & flushes 2024-12-09T11:23:10,297 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,297 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,297 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. after waiting 0 ms 2024-12-09T11:23:10,297 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,297 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,297 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 693b5ab06e8c428d0553d9f2b9d9d929: Waiting for close lock at 1733743390297Disabling compacts and flushes for region at 1733743390297Disabling writes for close at 1733743390297Writing region close event to WAL at 1733743390297Closed at 1733743390297 2024-12-09T11:23:10,299 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:23:10,299 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733743390299"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743390299"}]},"ts":"1733743390299"} 2024-12-09T11:23:10,302 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T11:23:10,303 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:23:10,304 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743390303"}]},"ts":"1733743390303"} 2024-12-09T11:23:10,306 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T11:23:10,306 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=693b5ab06e8c428d0553d9f2b9d9d929, ASSIGN}] 2024-12-09T11:23:10,308 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=693b5ab06e8c428d0553d9f2b9d9d929, ASSIGN 2024-12-09T11:23:10,309 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=693b5ab06e8c428d0553d9f2b9d9d929, ASSIGN; state=OFFLINE, location=2dff3a36d44f,42933,1733743388938; forceNewPlan=false, retain=false 2024-12-09T11:23:10,373 INFO [RS:1;2dff3a36d44f:42489 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C42489%2C1733743390184, suffix=, logDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184, archiveDir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs, maxLogs=32 2024-12-09T11:23:10,374 INFO [RS:1;2dff3a36d44f:42489 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42489%2C1733743390184.1733743390374 2024-12-09T11:23:10,381 INFO [RS:1;2dff3a36d44f:42489 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 2024-12-09T11:23:10,381 DEBUG [RS:1;2dff3a36d44f:42489 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35347:35347),(127.0.0.1/127.0.0.1:43067:43067)] 2024-12-09T11:23:10,460 INFO [2dff3a36d44f:43697 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T11:23:10,460 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=693b5ab06e8c428d0553d9f2b9d9d929, regionState=OPENING, regionLocation=2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:10,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=693b5ab06e8c428d0553d9f2b9d9d929, ASSIGN because future has completed 2024-12-09T11:23:10,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 693b5ab06e8c428d0553d9f2b9d9d929, server=2dff3a36d44f,42933,1733743388938}] 2024-12-09T11:23:10,621 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,621 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 693b5ab06e8c428d0553d9f2b9d9d929, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:10,622 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,622 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:10,622 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,622 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,624 INFO [StoreOpener-693b5ab06e8c428d0553d9f2b9d9d929-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,625 INFO [StoreOpener-693b5ab06e8c428d0553d9f2b9d9d929-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 693b5ab06e8c428d0553d9f2b9d9d929 columnFamilyName info 2024-12-09T11:23:10,626 DEBUG [StoreOpener-693b5ab06e8c428d0553d9f2b9d9d929-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:10,626 INFO [StoreOpener-693b5ab06e8c428d0553d9f2b9d9d929-1 {}] regionserver.HStore(327): Store=693b5ab06e8c428d0553d9f2b9d9d929/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:10,626 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,627 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,628 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,628 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,628 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,630 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,632 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:10,633 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 693b5ab06e8c428d0553d9f2b9d9d929; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842455, jitterRate=0.07123705744743347}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:23:10,633 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:10,634 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 693b5ab06e8c428d0553d9f2b9d9d929: Running coprocessor pre-open hook at 1733743390623Writing region info on filesystem at 1733743390623Initializing all the Stores at 1733743390624 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743390624Cleaning up temporary data from old regions at 1733743390628 (+4 ms)Running coprocessor post-open hooks at 1733743390633 (+5 ms)Region opened successfully at 1733743390633 2024-12-09T11:23:10,635 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929., pid=6, masterSystemTime=1733743390617 2024-12-09T11:23:10,637 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,638 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:10,639 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=693b5ab06e8c428d0553d9f2b9d9d929, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:10,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 693b5ab06e8c428d0553d9f2b9d9d929, server=2dff3a36d44f,42933,1733743388938 because future has completed 2024-12-09T11:23:10,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T11:23:10,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 693b5ab06e8c428d0553d9f2b9d9d929, server=2dff3a36d44f,42933,1733743388938 in 179 msec 2024-12-09T11:23:10,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T11:23:10,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=693b5ab06e8c428d0553d9f2b9d9d929, ASSIGN in 339 msec 2024-12-09T11:23:10,650 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:23:10,650 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743390650"}]},"ts":"1733743390650"} 2024-12-09T11:23:10,652 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T11:23:10,654 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:23:10,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 381 msec 2024-12-09T11:23:15,447 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:23:15,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:15,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:15,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:15,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:15,489 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T11:23:16,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T11:23:16,171 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T11:23:20,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43697 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:23:20,306 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-09T11:23:20,306 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-09T11:23:20,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T11:23:20,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:20,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:20,339 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:20,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:20,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:20,344 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:23:20,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bf7e565{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:20,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a7d29fa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:20,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19df8718{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-38077-hadoop-hdfs-3_4_1-tests_jar-_-any-2420254435903254579/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:20,472 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42c8e27e{HTTP/1.1, (http/1.1)}{localhost:38077} 2024-12-09T11:23:20,472 INFO [Time-limited test {}] server.Server(415): Started @121745ms 2024-12-09T11:23:20,474 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:20,518 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:20,521 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:20,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:20,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:20,522 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:23:20,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76687f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:20,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31a3e3f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:20,574 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:20,574 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:20,596 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:20,599 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1343611301c2e2c with lease ID 0x62224ef78c5454b2: Processing first storage report for DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd from datanode DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:20,599 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1343611301c2e2c with lease ID 0x62224ef78c5454b2: from storage DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd node DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:20,599 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1343611301c2e2c with lease ID 0x62224ef78c5454b2: Processing first storage report for DS-26ddac29-8a5a-499f-93bf-3e0977c62ead from datanode DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:20,599 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1343611301c2e2c with lease ID 0x62224ef78c5454b2: from storage DS-26ddac29-8a5a-499f-93bf-3e0977c62ead node DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:20,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@bc5ad3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-35267-hadoop-hdfs-3_4_1-tests_jar-_-any-9555832327098404281/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:20,641 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@624ccad6{HTTP/1.1, (http/1.1)}{localhost:35267} 2024-12-09T11:23:20,641 INFO [Time-limited test {}] server.Server(415): Started @121914ms 2024-12-09T11:23:20,642 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:20,675 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:20,678 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:20,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:20,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:20,679 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:20,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67ec96b7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:20,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66d367d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:20,742 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data8/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:20,742 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data7/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:20,767 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:20,769 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c5e8f8aa8139ce with lease ID 0x62224ef78c5454b3: Processing first storage report for DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd from datanode DatanodeRegistration(127.0.0.1:42609, datanodeUuid=13a707ad-da1a-4462-96aa-a191d85c78f9, infoPort=35601, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:20,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c5e8f8aa8139ce with lease ID 0x62224ef78c5454b3: from storage DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd node DatanodeRegistration(127.0.0.1:42609, datanodeUuid=13a707ad-da1a-4462-96aa-a191d85c78f9, infoPort=35601, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:23:20,770 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c5e8f8aa8139ce with lease ID 0x62224ef78c5454b3: Processing first storage report for DS-ad509904-9bea-4021-b0cf-aa022bd1cd09 from datanode DatanodeRegistration(127.0.0.1:42609, datanodeUuid=13a707ad-da1a-4462-96aa-a191d85c78f9, infoPort=35601, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:20,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c5e8f8aa8139ce with lease ID 0x62224ef78c5454b3: from storage DS-ad509904-9bea-4021-b0cf-aa022bd1cd09 node DatanodeRegistration(127.0.0.1:42609, datanodeUuid=13a707ad-da1a-4462-96aa-a191d85c78f9, infoPort=35601, infoSecurePort=0, ipcPort=44923, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:20,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1aa92950{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-41559-hadoop-hdfs-3_4_1-tests_jar-_-any-5228721891907190420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:20,810 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7874452e{HTTP/1.1, (http/1.1)}{localhost:41559} 2024-12-09T11:23:20,810 INFO [Time-limited test {}] server.Server(415): Started @122083ms 2024-12-09T11:23:20,811 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:20,921 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data10/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:20,921 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data9/current/BP-2059541493-172.17.0.3-1733743387425/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:20,942 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:20,944 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ec4a7a40f8601aa with lease ID 0x62224ef78c5454b4: Processing first storage report for DS-6383a02a-58c5-49df-89f5-405242871e7f from datanode DatanodeRegistration(127.0.0.1:33721, datanodeUuid=d6de4dc1-f00c-49e7-a9fc-6572b8868f79, infoPort=46795, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:20,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ec4a7a40f8601aa with lease ID 0x62224ef78c5454b4: from storage DS-6383a02a-58c5-49df-89f5-405242871e7f node DatanodeRegistration(127.0.0.1:33721, datanodeUuid=d6de4dc1-f00c-49e7-a9fc-6572b8868f79, infoPort=46795, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:20,944 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ec4a7a40f8601aa with lease ID 0x62224ef78c5454b4: Processing first storage report for DS-b10192d7-c1d5-4b8a-a4c4-352203961757 from datanode DatanodeRegistration(127.0.0.1:33721, datanodeUuid=d6de4dc1-f00c-49e7-a9fc-6572b8868f79, infoPort=46795, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425) 2024-12-09T11:23:20,944 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ec4a7a40f8601aa with lease ID 0x62224ef78c5454b4: from storage DS-b10192d7-c1d5-4b8a-a4c4-352203961757 node DatanodeRegistration(127.0.0.1:33721, datanodeUuid=d6de4dc1-f00c-49e7-a9fc-6572b8868f79, infoPort=46795, infoSecurePort=0, ipcPort=39465, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:21,032 WARN [ResponseProcessor for block BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,032 WARN [ResponseProcessor for block BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,033 WARN [ResponseProcessor for block BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,033 WARN [ResponseProcessor for block BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,033 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:21,033 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:21,033 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta block BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:21,033 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:21,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_757929507_22 at /127.0.0.1:56674 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37587:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56674 dst: /127.0.0.1:37587 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,034 WARN [PacketResponder: BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37587] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_757929507_22 at /127.0.0.1:33562 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33562 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:56612 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37587:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56612 dst: /127.0.0.1:37587 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56648 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37587:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56648 dst: /127.0.0.1:37587 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:33530 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33530 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,035 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56642 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37587:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56642 dst: /127.0.0.1:37587 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,036 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:33546 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33546 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,036 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:33488 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33488 dst: /127.0.0.1:35777 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,037 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51f3eae5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:21,038 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fbfcb3a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:21,038 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:21,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b8a0b06{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:21,039 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5192cae1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:21,040 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:21,040 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:21,040 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2059541493-172.17.0.3-1733743387425 (Datanode Uuid 2246728c-2716-42f5-9852-b7b26aeb1f07) service to localhost/127.0.0.1:42671 2024-12-09T11:23:21,040 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:21,040 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data3/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:21,041 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data4/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:21,041 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:21,041 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,041 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta block BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,047 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,050 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@567fcb2a {}] datanode.DataXceiver(331): 127.0.0.1:35777:DataXceiver error processing unknown operation src: /127.0.0.1:49822 dst: /127.0.0.1:35777 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:21,051 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3130a42a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:21,057 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@341c1108{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:21,057 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:21,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5da4e9e3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:21,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@581c9bfb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:21,059 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:21,059 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2059541493-172.17.0.3-1733743387425 (Datanode Uuid afe86b40-b1a5-4ed5-9b65-30addde00ac5) service to localhost/127.0.0.1:42671 2024-12-09T11:23:21,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:21,061 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data2/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:21,062 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:21,063 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:21,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data1/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:21,065 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929., hostname=2dff3a36d44f,42933,1733743388938, seqNum=2] 2024-12-09T11:23:21,066 ERROR [FSHLog-0-hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e-prefix:2dff3a36d44f,42933,1733743388938 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,066 WARN [FSHLog-0-hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e-prefix:2dff3a36d44f,42933,1733743388938 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,067 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,067 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C42933%2C1733743388938:(num 1733743389516) roll requested 2024-12-09T11:23:21,067 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.1733743401067 2024-12-09T11:23:21,077 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:21,077 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:21,077 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:21,077 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:21,077 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:21,077 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 2024-12-09T11:23:21,077 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,078 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:21,079 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35601:35601),(127.0.0.1/127.0.0.1:46795:46795)] 2024-12-09T11:23:21,079 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T11:23:21,079 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:21,079 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T11:23:21,079 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 2024-12-09T11:23:21,082 WARN [IPC Server handler 3 on default port 42671 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-09T11:23:21,085 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 after 5ms 2024-12-09T11:23:21,132 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:22,239 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:23,079 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:23,080 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 2024-12-09T11:23:23,081 WARN [ResponseProcessor for block BP-2059541493-172.17.0.3-1733743387425:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2059541493-172.17.0.3-1733743387425:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:23,081 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:23,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:53702 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:42609:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53702 dst: /127.0.0.1:42609 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:23,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:34602 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34602 dst: /127.0.0.1:33721 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:23,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@bc5ad3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:23,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@624ccad6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:23,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:23,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31a3e3f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:23,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76687f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:23,087 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:23,087 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:23,087 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2059541493-172.17.0.3-1733743387425 (Datanode Uuid 13a707ad-da1a-4462-96aa-a191d85c78f9) service to localhost/127.0.0.1:42671 2024-12-09T11:23:23,087 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:23,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data7/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:23,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data8/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:23,089 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:23,132 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:24,239 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:25,079 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:25,080 WARN [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]] 2024-12-09T11:23:25,080 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C42933%2C1733743388938:(num 1733743401067) roll requested 2024-12-09T11:23:25,080 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.1733743405080 2024-12-09T11:23:25,083 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:25,083 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:25,083 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741839_1021 2024-12-09T11:23:25,086 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 after 4007ms 2024-12-09T11:23:25,086 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:25,093 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:23:25,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:25,094 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:25,094 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:25,094 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:25,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:25,095 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743405080 2024-12-09T11:23:25,095 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46795:46795),(127.0.0.1/127.0.0.1:40309:40309)] 2024-12-09T11:23:25,096 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:25,096 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 is not closed yet, will try archiving it next time 2024-12-09T11:23:25,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33721 is added to blk_1073741838_1020 (size=2431) 2024-12-09T11:23:25,133 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:25,497 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:25,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741838_1020 (size=2431) 2024-12-09T11:23:26,239 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,096 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,097 WARN [ResponseProcessor for block BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1022 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,098 WARN [DataStreamer for file /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743405080 block BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:27,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:34620 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:33721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34620 dst: /127.0.0.1:33721 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,098 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56304 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56304 dst: /127.0.0.1:36659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,100 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1aa92950{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:27,100 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7874452e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:27,100 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:27,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66d367d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:27,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67ec96b7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:27,102 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:27,102 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:27,102 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:27,102 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2059541493-172.17.0.3-1733743387425 (Datanode Uuid d6de4dc1-f00c-49e7-a9fc-6572b8868f79) service to localhost/127.0.0.1:42671 2024-12-09T11:23:27,103 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data9/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:27,103 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data10/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:27,103 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:27,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42933 {}] regionserver.HRegion(8855): Flush requested on 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:27,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:23:27,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/d350547985404429a362401affd56513 is 1080, key is row0002/info:/1733743403090/Put/seqid=0 2024-12-09T11:23:27,132 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37587 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56338 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741841_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741841_1024 to mirror 127.0.0.1:37587 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,133 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:27,133 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741841_1024 2024-12-09T11:23:27,133 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56338 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741841_1024] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:27,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56338 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741841_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56338 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,133 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:27,133 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,134 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,134 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:27,134 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741842_1025 2024-12-09T11:23:27,135 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:27,136 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,136 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:27,136 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741843_1026 2024-12-09T11:23:27,137 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:27,138 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35777 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,138 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56340 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741844_1027 to mirror 127.0.0.1:35777 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,139 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:27,139 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741844_1027 2024-12-09T11:23:27,139 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56340 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:27,139 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56340 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56340 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,139 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:27,140 WARN [IPC Server handler 2 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:27,140 WARN [IPC Server handler 2 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:27,140 WARN [IPC Server handler 2 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:27,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741845_1028 (size=10347) 2024-12-09T11:23:27,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/d350547985404429a362401affd56513 2024-12-09T11:23:27,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/d350547985404429a362401affd56513 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/d350547985404429a362401affd56513 2024-12-09T11:23:27,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/d350547985404429a362401affd56513, entries=5, sequenceid=11, filesize=10.1 K 2024-12-09T11:23:27,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 693b5ab06e8c428d0553d9f2b9d9d929 in 453ms, sequenceid=11, compaction requested=false 2024-12-09T11:23:27,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42933 {}] regionserver.HRegion(8855): Flush requested on 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:27,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T11:23:27,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/30b393f7a82d4c85ad377a4be3a3615b is 1080, key is row0007/info:/1733743407112/Put/seqid=0 2024-12-09T11:23:27,741 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,742 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:27,742 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741846_1029 2024-12-09T11:23:27,743 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:27,745 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,745 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:27,745 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741847_1030 2024-12-09T11:23:27,746 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:27,748 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33721 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56364 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741848_1031 to mirror 127.0.0.1:33721 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,749 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:27,749 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741848_1031 2024-12-09T11:23:27,749 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56364 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:27,750 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:56364 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56364 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:27,750 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:27,751 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:27,751 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:27,751 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741849_1032 2024-12-09T11:23:27,752 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:27,752 WARN [IPC Server handler 4 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:27,752 WARN [IPC Server handler 4 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:27,753 WARN [IPC Server handler 4 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:27,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741850_1033 (size=12506) 2024-12-09T11:23:28,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/30b393f7a82d4c85ad377a4be3a3615b 2024-12-09T11:23:28,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/30b393f7a82d4c85ad377a4be3a3615b as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b 2024-12-09T11:23:28,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b, entries=7, sequenceid=24, filesize=12.2 K 2024-12-09T11:23:28,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 693b5ab06e8c428d0553d9f2b9d9d929 in 447ms, sequenceid=24, compaction requested=false 2024-12-09T11:23:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-09T11:23:28,181 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:28,182 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b because midkey is the same as first or last row 2024-12-09T11:23:28,240 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,096 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,096 WARN [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]] 2024-12-09T11:23:29,096 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C42933%2C1733743388938:(num 1733743405080) roll requested 2024-12-09T11:23:29,097 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.1733743409097 2024-12-09T11:23:29,101 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,101 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:29,101 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741851_1034 2024-12-09T11:23:29,102 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:29,103 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,103 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:29,103 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741852_1035 2024-12-09T11:23:29,103 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:29,105 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,105 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:29,105 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741853_1036 2024-12-09T11:23:29,105 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:29,108 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37587 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,108 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:29,108 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40256 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741854_1037 to mirror 127.0.0.1:37587 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,108 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741854_1037 2024-12-09T11:23:29,108 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40256 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T11:23:29,108 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40256 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40256 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,108 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:29,109 WARN [IPC Server handler 1 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:29,109 WARN [IPC Server handler 1 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:29,109 WARN [IPC Server handler 1 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:29,112 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:29,112 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:29,112 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:29,112 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:29,112 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:29,112 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743405080 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743409097 2024-12-09T11:23:29,113 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40309:40309)] 2024-12-09T11:23:29,113 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:29,113 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743405080 is not closed yet, will try archiving it next time 2024-12-09T11:23:29,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741840_1023 (size=25992) 2024-12-09T11:23:29,114 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs/2dff3a36d44f%2C42933%2C1733743388938.1733743401067 2024-12-09T11:23:29,115 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:29,133 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42933 {}] regionserver.HRegion(8855): Flush requested on 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:29,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T11:23:29,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/29a2e61394eb4a64bd28a6d259dcae2a is 1079, key is tmprow/info:/1733743409160/Put/seqid=0 2024-12-09T11:23:29,168 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,168 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:29,168 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741856_1039 2024-12-09T11:23:29,169 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:29,170 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,170 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:29,170 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741857_1040 2024-12-09T11:23:29,171 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:29,173 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35777 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40278 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741858_1041 to mirror 127.0.0.1:35777 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,173 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:29,173 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741858_1041 2024-12-09T11:23:29,173 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40278 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:29,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40278 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40278 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,174 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:29,176 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37587 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,176 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40282 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741859_1042 to mirror 127.0.0.1:37587 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,176 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:29,176 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741859_1042 2024-12-09T11:23:29,176 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40282 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:29,176 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40282 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40282 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,176 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:29,177 WARN [IPC Server handler 2 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:29,177 WARN [IPC Server handler 2 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:29,177 WARN [IPC Server handler 2 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:29,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741860_1043 (size=6027) 2024-12-09T11:23:29,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/29a2e61394eb4a64bd28a6d259dcae2a 2024-12-09T11:23:29,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/29a2e61394eb4a64bd28a6d259dcae2a as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/29a2e61394eb4a64bd28a6d259dcae2a 2024-12-09T11:23:29,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/29a2e61394eb4a64bd28a6d259dcae2a, entries=1, sequenceid=34, filesize=5.9 K 2024-12-09T11:23:29,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 693b5ab06e8c428d0553d9f2b9d9d929 in 432ms, sequenceid=34, compaction requested=true 2024-12-09T11:23:29,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:29,594 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-09T11:23:29,594 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:29,594 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b because midkey is the same as first or last row 2024-12-09T11:23:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 693b5ab06e8c428d0553d9f2b9d9d929:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:23:29,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:23:29,595 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:23:29,596 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:23:29,596 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HStore(1541): 693b5ab06e8c428d0553d9f2b9d9d929/info is initiating minor compaction (all files) 2024-12-09T11:23:29,596 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 693b5ab06e8c428d0553d9f2b9d9d929/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:29,596 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/d350547985404429a362401affd56513, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/29a2e61394eb4a64bd28a6d259dcae2a] into tmpdir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp, totalSize=28.2 K 2024-12-09T11:23:29,597 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.Compactor(225): Compacting d350547985404429a362401affd56513, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733743403090 2024-12-09T11:23:29,597 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.Compactor(225): Compacting 30b393f7a82d4c85ad377a4be3a3615b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733743407112 2024-12-09T11:23:29,598 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29a2e61394eb4a64bd28a6d259dcae2a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733743409160 2024-12-09T11:23:29,599 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1339f3dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741845_1028 to 127.0.0.1:35777 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,599 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1dfe1fca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741850_1033 to 127.0.0.1:42609 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:29,613 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 693b5ab06e8c428d0553d9f2b9d9d929#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:23:29,613 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/7c4a943e632f4333b7fab3417d58d93c is 1080, key is row0002/info:/1733743403090/Put/seqid=0 2024-12-09T11:23:29,615 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,616 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:29,616 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741861_1044 2024-12-09T11:23:29,616 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:29,618 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,618 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:29,618 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741862_1045 2024-12-09T11:23:29,618 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:29,619 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,620 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:29,620 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741863_1046 2024-12-09T11:23:29,620 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:29,621 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:29,622 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:29,622 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741864_1047 2024-12-09T11:23:29,622 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:29,623 WARN [IPC Server handler 4 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:29,623 WARN [IPC Server handler 4 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:29,623 WARN [IPC Server handler 4 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:29,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741865_1048 (size=17994) 2024-12-09T11:23:30,035 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/7c4a943e632f4333b7fab3417d58d93c as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c 2024-12-09T11:23:30,042 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 693b5ab06e8c428d0553d9f2b9d9d929/info of 693b5ab06e8c428d0553d9f2b9d9d929 into 7c4a943e632f4333b7fab3417d58d93c(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:30,042 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929., storeName=693b5ab06e8c428d0553d9f2b9d9d929/info, priority=13, startTime=1733743409594; duration=0sec 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c because midkey is the same as first or last row 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c because midkey is the same as first or last row 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c because midkey is the same as first or last row 2024-12-09T11:23:30,042 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:23:30,043 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 693b5ab06e8c428d0553d9f2b9d9d929:info 2024-12-09T11:23:30,240 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42933 {}] regionserver.HRegion(8855): Flush requested on 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:30,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T11:23:30,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/449793d6b5cb41808a115a847e2b6ecc is 1079, key is tmprow/info:/1733743410579/Put/seqid=0 2024-12-09T11:23:30,588 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35777 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:30,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40320 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741866_1049 to mirror 127.0.0.1:35777 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:30,588 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:30,588 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741866_1049 2024-12-09T11:23:30,588 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40320 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:30,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40320 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40320 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:30,589 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:30,591 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33721 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:30,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40332 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741867_1050 to mirror 127.0.0.1:33721 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:30,591 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:30,591 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741867_1050 2024-12-09T11:23:30,591 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40332 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:30,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40332 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40332 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:30,591 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:30,592 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:30,592 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:30,592 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741868_1051 2024-12-09T11:23:30,593 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:30,594 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:30,594 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:30,594 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741869_1052 2024-12-09T11:23:30,595 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:30,595 WARN [IPC Server handler 1 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:30,595 WARN [IPC Server handler 1 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:30,595 WARN [IPC Server handler 1 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:30,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741870_1053 (size=6027) 2024-12-09T11:23:31,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/449793d6b5cb41808a115a847e2b6ecc 2024-12-09T11:23:31,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/449793d6b5cb41808a115a847e2b6ecc as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/449793d6b5cb41808a115a847e2b6ecc 2024-12-09T11:23:31,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/449793d6b5cb41808a115a847e2b6ecc, entries=1, sequenceid=45, filesize=5.9 K 2024-12-09T11:23:31,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 693b5ab06e8c428d0553d9f2b9d9d929 in 434ms, sequenceid=45, compaction requested=false 2024-12-09T11:23:31,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:31,014 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-09T11:23:31,014 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:31,015 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c because midkey is the same as first or last row 2024-12-09T11:23:31,114 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:31,114 WARN [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]] 2024-12-09T11:23:31,115 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C42933%2C1733743388938:(num 1733743409097) roll requested 2024-12-09T11:23:31,115 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.1733743411115 2024-12-09T11:23:31,118 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:31,118 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:31,118 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741871_1054 2024-12-09T11:23:31,118 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:31,120 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33721 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:31,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40356 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741872_1055 to mirror 127.0.0.1:33721 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:31,121 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:31,121 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741872_1055 2024-12-09T11:23:31,121 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40356 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T11:23:31,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40356 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40356 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:31,121 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:31,122 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:31,122 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:31,123 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741873_1056 2024-12-09T11:23:31,123 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:31,125 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37587 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:31,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40360 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741874_1057 to mirror 127.0.0.1:37587 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:31,125 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:31,125 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741874_1057 2024-12-09T11:23:31,125 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40360 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T11:23:31,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40360 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40360 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:31,126 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:31,126 WARN [IPC Server handler 0 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:31,126 WARN [IPC Server handler 0 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:31,126 WARN [IPC Server handler 0 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:31,129 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:31,129 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:31,129 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:31,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:31,129 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:31,129 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743409097 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743411115 2024-12-09T11:23:31,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741855_1038 (size=13591) 2024-12-09T11:23:31,132 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:31,132 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743405080 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs/2dff3a36d44f%2C42933%2C1733743388938.1733743405080 2024-12-09T11:23:31,132 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40309:40309)] 2024-12-09T11:23:31,132 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 is not closed yet, will try archiving it next time 2024-12-09T11:23:31,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42933 {}] regionserver.HRegion(8855): Flush requested on 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:32,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T11:23:32,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/ae13a32aadab4c1a89256672565e1a84 is 1079, key is tmprow/info:/1733743412001/Put/seqid=0 2024-12-09T11:23:32,010 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,010 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:32,010 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741876_1059 2024-12-09T11:23:32,011 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:32,013 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35777 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,013 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40378 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741877_1060 to mirror 127.0.0.1:35777 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,014 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:32,014 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741877_1060 2024-12-09T11:23:32,014 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40378 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:32,014 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40378 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40378 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,014 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:32,017 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42609 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,017 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40392 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741878_1061 to mirror 127.0.0.1:42609 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,017 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:32,018 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741878_1061 2024-12-09T11:23:32,018 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40392 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:32,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40392 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40392 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,018 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:32,020 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,020 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:32,020 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741879_1062 2024-12-09T11:23:32,020 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:32,021 WARN [IPC Server handler 4 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:32,021 WARN [IPC Server handler 4 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:32,021 WARN [IPC Server handler 4 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:32,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741880_1063 (size=6027) 2024-12-09T11:23:32,240 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,425 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/ae13a32aadab4c1a89256672565e1a84 2024-12-09T11:23:32,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/ae13a32aadab4c1a89256672565e1a84 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/ae13a32aadab4c1a89256672565e1a84 2024-12-09T11:23:32,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/ae13a32aadab4c1a89256672565e1a84, entries=1, sequenceid=55, filesize=5.9 K 2024-12-09T11:23:32,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 693b5ab06e8c428d0553d9f2b9d9d929 in 436ms, sequenceid=55, compaction requested=true 2024-12-09T11:23:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-09T11:23:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c because midkey is the same as first or last row 2024-12-09T11:23:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 693b5ab06e8c428d0553d9f2b9d9d929:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:23:32,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:23:32,438 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:23:32,439 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:23:32,439 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HStore(1541): 693b5ab06e8c428d0553d9f2b9d9d929/info is initiating minor compaction (all files) 2024-12-09T11:23:32,439 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 693b5ab06e8c428d0553d9f2b9d9d929/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:32,440 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/449793d6b5cb41808a115a847e2b6ecc, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/ae13a32aadab4c1a89256672565e1a84] into tmpdir=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp, totalSize=29.3 K 2024-12-09T11:23:32,440 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c4a943e632f4333b7fab3417d58d93c, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733743403090 2024-12-09T11:23:32,440 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.Compactor(225): Compacting 449793d6b5cb41808a115a847e2b6ecc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733743410579 2024-12-09T11:23:32,441 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] compactions.Compactor(225): Compacting ae13a32aadab4c1a89256672565e1a84, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733743412001 2024-12-09T11:23:32,457 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 693b5ab06e8c428d0553d9f2b9d9d929#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:23:32,458 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/252acc73b6ae4f16bfe5d5817ed72720 is 1080, key is row0002/info:/1733743403090/Put/seqid=0 2024-12-09T11:23:32,460 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,460 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:32,461 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741881_1064 2024-12-09T11:23:32,461 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:32,462 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,462 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:32,462 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741882_1065 2024-12-09T11:23:32,463 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:32,464 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,465 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:32,465 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741883_1066 2024-12-09T11:23:32,465 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:32,467 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37587 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:32,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40420 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741884_1067 to mirror 127.0.0.1:37587 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,468 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]) is bad. 2024-12-09T11:23:32,468 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40420 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:32,468 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741884_1067 2024-12-09T11:23:32,468 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:40420 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40420 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,469 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37587,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK] 2024-12-09T11:23:32,469 WARN [IPC Server handler 1 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T11:23:32,469 WARN [IPC Server handler 1 on default port 42671 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T11:23:32,469 WARN [IPC Server handler 1 on default port 42671 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T11:23:32,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741885_1068 (size=18097) 2024-12-09T11:23:32,599 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1339f3dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741860_1043 to 127.0.0.1:42609 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,599 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1dfe1fca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741840_1023 to 127.0.0.1:42609 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:32,893 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/252acc73b6ae4f16bfe5d5817ed72720 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/252acc73b6ae4f16bfe5d5817ed72720 2024-12-09T11:23:32,917 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 693b5ab06e8c428d0553d9f2b9d9d929/info of 693b5ab06e8c428d0553d9f2b9d9d929 into 252acc73b6ae4f16bfe5d5817ed72720(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:23:32,917 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:32,917 INFO [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929., storeName=693b5ab06e8c428d0553d9f2b9d9d929/info, priority=13, startTime=1733743412438; duration=0sec 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/252acc73b6ae4f16bfe5d5817ed72720 because midkey is the same as first or last row 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/252acc73b6ae4f16bfe5d5817ed72720 because midkey is the same as first or last row 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/252acc73b6ae4f16bfe5d5817ed72720 because midkey is the same as first or last row 2024-12-09T11:23:32,918 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:23:32,919 DEBUG [RS:0;2dff3a36d44f:42933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 693b5ab06e8c428d0553d9f2b9d9d929:info 2024-12-09T11:23:33,132 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:33,133 WARN [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T11:23:33,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:33,253 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:33,263 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:33,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:33,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:33,285 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:33,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5dd34496{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:33,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6409ef82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:33,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73c11417{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/java.io.tmpdir/jetty-localhost-35991-hadoop-hdfs-3_4_1-tests_jar-_-any-2050248459786043277/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:33,491 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68fc6dd2{HTTP/1.1, (http/1.1)}{localhost:35991} 2024-12-09T11:23:33,492 INFO [Time-limited test {}] server.Server(415): Started @134765ms 2024-12-09T11:23:33,493 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:33,605 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1339f3dd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741870_1053 to 127.0.0.1:35777 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:33,605 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1dfe1fca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741865_1048 to 127.0.0.1:33721 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:33,651 WARN [Thread-991 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:33,664 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b7bdd9267c32fbb with lease ID 0x62224ef78c5454b5: from storage DS-185b7880-58c5-4218-ad2f-2db231ca8105 node DatanodeRegistration(127.0.0.1:34565, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=40743, infoSecurePort=0, ipcPort=46787, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:33,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b7bdd9267c32fbb with lease ID 0x62224ef78c5454b5: from storage DS-c0f2d9c4-ea98-4623-83c1-2b07a57697bb node DatanodeRegistration(127.0.0.1:34565, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=40743, infoSecurePort=0, ipcPort=46787, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:34,241 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:35,133 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:35,134 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741855_1038 (size=13591) 2024-12-09T11:23:35,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741880_1063 (size=6027) 2024-12-09T11:23:36,241 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:36,599 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1dfe1fca[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36659, datanodeUuid=be79e6b3-d4cb-48c0-80b4-1f6daeeb8041, infoPort=40309, infoSecurePort=0, ipcPort=33587, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741885_1068 to 127.0.0.1:33721 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:37,133 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:37,135 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:38,242 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:38,764 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:23:39,134 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,135 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,289 ERROR [FSHLog-0-hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData-prefix:2dff3a36d44f,43697,1733743388823 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,289 WARN [FSHLog-0-hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData-prefix:2dff3a36d44f,43697,1733743388823 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,289 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C43697%2C1733743388823:(num 1733743389176) roll requested 2024-12-09T11:23:39,290 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C43697%2C1733743388823.1733743419290 2024-12-09T11:23:39,295 WARN [Thread-1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42609 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,295 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:57742 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data4]'}, localName='127.0.0.1:34565', datanodeUuid='2246728c-2716-42f5-9852-b7b26aeb1f07', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741886_1069 to mirror 127.0.0.1:42609 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:39,296 WARN [Thread-1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:39,296 WARN [Thread-1013 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741886_1069 2024-12-09T11:23:39,296 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:57742 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T11:23:39,296 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:57742 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:34565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57742 dst: /127.0.0.1:34565 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:39,296 WARN [Thread-1013 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:39,299 WARN [Thread-1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33721 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,299 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:47012 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6]'}, localName='127.0.0.1:36659', datanodeUuid='be79e6b3-d4cb-48c0-80b4-1f6daeeb8041', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741887_1070 to mirror 127.0.0.1:33721 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:39,299 WARN [Thread-1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:39,299 WARN [Thread-1013 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741887_1070 2024-12-09T11:23:39,299 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:47012 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T11:23:39,299 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_800564294_22 at /127.0.0.1:47012 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47012 dst: /127.0.0.1:36659 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:39,303 WARN [Thread-1013 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:39,308 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:39,308 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:39,308 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:39,310 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:39,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:39,311 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743419290 2024-12-09T11:23:39,313 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,313 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:39,313 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 2024-12-09T11:23:39,314 WARN [IPC Server handler 1 on default port 42671 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-12-09T11:23:39,314 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40743:40743),(127.0.0.1/127.0.0.1:40309:40309)] 2024-12-09T11:23:39,314 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 is not closed yet, will try archiving it next time 2024-12-09T11:23:39,314 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 after 1ms 2024-12-09T11:23:40,242 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:41,135 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:42,242 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:43,135 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:43,316 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 after 4003ms 2024-12-09T11:23:43,690 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6feed621 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:35777,null,null]) java.net.ConnectException: Call From 2dff3a36d44f/172.17.0.3 to localhost:35811 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T11:23:43,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741833_1019 (size=455) 2024-12-09T11:23:44,107 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs/2dff3a36d44f%2C42933%2C1733743388938.1733743389516 2024-12-09T11:23:44,108 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743409097 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs/2dff3a36d44f%2C42933%2C1733743388938.1733743409097 2024-12-09T11:23:44,243 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:44,658 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1687ba41[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34565, datanodeUuid=2246728c-2716-42f5-9852-b7b26aeb1f07, infoPort=40743, infoSecurePort=0, ipcPort=46787, storageInfo=lv=-57;cid=testClusterID;nsid=852884267;c=1733743387425):Failed to transfer BP-2059541493-172.17.0.3-1733743387425:blk_1073741833_1019 to 127.0.0.1:42609 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:45,135 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:46,243 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:46,983 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.1733743426982 2024-12-09T11:23:46,985 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:46,986 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:46,986 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741889_1073 2024-12-09T11:23:46,986 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:46,990 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:46,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:46,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:46,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:46,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:46,991 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743411115 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743426982 2024-12-09T11:23:46,992 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40309:40309),(127.0.0.1/127.0.0.1:40743:40743)] 2024-12-09T11:23:46,992 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.1733743411115 is not closed yet, will try archiving it next time 2024-12-09T11:23:46,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741875_1058 (size=12911) 2024-12-09T11:23:46,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42933 {}] regionserver.HRegion(8855): Flush requested on 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:46,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T11:23:47,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/c2b3ebe955cc41d68db889b4a5c460c1 is 1080, key is row0013/info:/1733743426993/Put/seqid=0 2024-12-09T11:23:47,009 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:57782 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data4]'}, localName='127.0.0.1:34565', datanodeUuid='2246728c-2716-42f5-9852-b7b26aeb1f07', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741891_1075 to mirror 127.0.0.1:33721 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:47,009 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33721 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,010 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:57782 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T11:23:47,010 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:47,010 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741891_1075 2024-12-09T11:23:47,010 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:57782 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:34565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57782 dst: /127.0.0.1:34565 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:47,010 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741892_1076 (size=8190) 2024-12-09T11:23:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741892_1076 (size=8190) 2024-12-09T11:23:47,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/c2b3ebe955cc41d68db889b4a5c460c1 2024-12-09T11:23:47,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/c2b3ebe955cc41d68db889b4a5c460c1 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/c2b3ebe955cc41d68db889b4a5c460c1 2024-12-09T11:23:47,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/c2b3ebe955cc41d68db889b4a5c460c1, entries=3, sequenceid=66, filesize=8.0 K 2024-12-09T11:23:47,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 693b5ab06e8c428d0553d9f2b9d9d929 in 39ms, sequenceid=66, compaction requested=false 2024-12-09T11:23:47,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 693b5ab06e8c428d0553d9f2b9d9d929: 2024-12-09T11:23:47,037 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-12-09T11:23:47,037 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:23:47,038 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/252acc73b6ae4f16bfe5d5817ed72720 because midkey is the same as first or last row 2024-12-09T11:23:47,136 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,136 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-09T11:23:47,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:23:47,222 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:23:47,222 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:47,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:47,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:47,222 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:23:47,222 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:23:47,222 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1643275885, stopped=false 2024-12-09T11:23:47,223 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,43697,1733743388823 2024-12-09T11:23:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:47,225 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:23:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:47,225 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:23:47,225 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:47,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:47,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:47,225 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,42933,1733743388938' ***** 2024-12-09T11:23:47,225 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:23:47,225 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,42489,1733743390184' ***** 2024-12-09T11:23:47,225 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:23:47,226 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:23:47,226 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:23:47,226 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:23:47,226 INFO [RS:1;2dff3a36d44f:42489 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:23:47,226 INFO [RS:0;2dff3a36d44f:42933 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:23:47,226 INFO [RS:1;2dff3a36d44f:42489 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:23:47,226 INFO [RS:0;2dff3a36d44f:42933 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:23:47,226 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:47,226 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:47,226 INFO [RS:1;2dff3a36d44f:42489 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;2dff3a36d44f:42489. 2024-12-09T11:23:47,226 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:23:47,227 DEBUG [RS:1;2dff3a36d44f:42489 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:47,227 DEBUG [RS:1;2dff3a36d44f:42489 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:47,227 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(3091): Received CLOSE for 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:47,227 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:47,227 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,42489,1733743390184; all regions closed. 2024-12-09T11:23:47,227 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:47,228 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:47,228 INFO [RS:0;2dff3a36d44f:42933 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:42933. 2024-12-09T11:23:47,228 DEBUG [RS:0;2dff3a36d44f:42933 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:23:47,228 DEBUG [RS:0;2dff3a36d44f:42933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:47,228 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:23:47,228 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,228 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:23:47,228 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,228 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 693b5ab06e8c428d0553d9f2b9d9d929, disabling compactions & flushes 2024-12-09T11:23:47,228 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,228 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:47,228 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:47,228 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,228 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. after waiting 0 ms 2024-12-09T11:23:47,228 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:47,228 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,229 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 693b5ab06e8c428d0553d9f2b9d9d929 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-09T11:23:47,229 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,229 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,229 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:47,229 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 2024-12-09T11:23:47,230 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:47,230 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:23:47,230 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:23:47,230 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:23:47,230 WARN [IPC Server handler 3 on default port 42671 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-12-09T11:23:47,230 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1325): Online Regions={693b5ab06e8c428d0553d9f2b9d9d929=TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T11:23:47,230 DEBUG [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 693b5ab06e8c428d0553d9f2b9d9d929 2024-12-09T11:23:47,231 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 after 2ms 2024-12-09T11:23:47,233 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:23:47,233 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:23:47,233 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:23:47,233 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:23:47,233 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:23:47,233 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-09T11:23:47,233 ERROR [FSHLog-0-hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e-prefix:2dff3a36d44f,42933,1733743388938.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,234 WARN [FSHLog-0-hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e-prefix:2dff3a36d44f,42933,1733743388938.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,234 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C42933%2C1733743388938.meta:.meta(num 1733743389925) roll requested 2024-12-09T11:23:47,234 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42933%2C1733743388938.meta.1733743427234.meta 2024-12-09T11:23:47,235 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/6abe549381a844eb8f3a696a5a240f70 is 1080, key is row0015/info:/1733743426999/Put/seqid=0 2024-12-09T11:23:47,237 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,237 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:47,237 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741894_1079 2024-12-09T11:23:47,238 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:47,239 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:23:47,239 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:23:47,239 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,239 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:47,239 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741895_1080 2024-12-09T11:23:47,240 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:47,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741893_1078 (size=14660) 2024-12-09T11:23:47,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741893_1078 (size=14660) 2024-12-09T11:23:47,242 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/6abe549381a844eb8f3a696a5a240f70 2024-12-09T11:23:47,242 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42609 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,242 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:57824 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741896_1081] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data4]'}, localName='127.0.0.1:34565', datanodeUuid='2246728c-2716-42f5-9852-b7b26aeb1f07', xmitsInProgress=0}:Exception transferring block BP-2059541493-172.17.0.3-1733743387425:blk_1073741896_1081 to mirror 127.0.0.1:42609 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:47,242 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK], DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:47,242 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:57824 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741896_1081] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T11:23:47,242 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741896_1081 2024-12-09T11:23:47,242 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2011339957_22 at /127.0.0.1:57824 [Receiving block BP-2059541493-172.17.0.3-1733743387425:blk_1073741896_1081] {}] datanode.DataXceiver(331): 127.0.0.1:34565:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57824 dst: /127.0.0.1:34565 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:47,243 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:47,250 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,250 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,251 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,251 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,254 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/.tmp/info/6abe549381a844eb8f3a696a5a240f70 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/6abe549381a844eb8f3a696a5a240f70 2024-12-09T11:23:47,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,254 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743427234.meta 2024-12-09T11:23:47,255 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,255 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,255 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta 2024-12-09T11:23:47,256 WARN [IPC Server handler 0 on default port 42671 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-12-09T11:23:47,256 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta after 1ms 2024-12-09T11:23:47,259 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40743:40743),(127.0.0.1/127.0.0.1:40309:40309)] 2024-12-09T11:23:47,259 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta is not closed yet, will try archiving it next time 2024-12-09T11:23:47,261 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/6abe549381a844eb8f3a696a5a240f70, entries=9, sequenceid=78, filesize=14.3 K 2024-12-09T11:23:47,263 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 693b5ab06e8c428d0553d9f2b9d9d929 in 35ms, sequenceid=78, compaction requested=true 2024-12-09T11:23:47,264 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/d350547985404429a362401affd56513, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/29a2e61394eb4a64bd28a6d259dcae2a, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/449793d6b5cb41808a115a847e2b6ecc, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/ae13a32aadab4c1a89256672565e1a84] to archive 2024-12-09T11:23:47,265 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:23:47,268 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/d350547985404429a362401affd56513 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/d350547985404429a362401affd56513 2024-12-09T11:23:47,271 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/30b393f7a82d4c85ad377a4be3a3615b 2024-12-09T11:23:47,272 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/7c4a943e632f4333b7fab3417d58d93c 2024-12-09T11:23:47,275 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/29a2e61394eb4a64bd28a6d259dcae2a to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/29a2e61394eb4a64bd28a6d259dcae2a 2024-12-09T11:23:47,276 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/449793d6b5cb41808a115a847e2b6ecc to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/449793d6b5cb41808a115a847e2b6ecc 2024-12-09T11:23:47,277 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/ae13a32aadab4c1a89256672565e1a84 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/info/ae13a32aadab4c1a89256672565e1a84 2024-12-09T11:23:47,277 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2dff3a36d44f:43697 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T11:23:47,278 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d350547985404429a362401affd56513=10347, 30b393f7a82d4c85ad377a4be3a3615b=12506, 7c4a943e632f4333b7fab3417d58d93c=17994, 29a2e61394eb4a64bd28a6d259dcae2a=6027, 449793d6b5cb41808a115a847e2b6ecc=6027, ae13a32aadab4c1a89256672565e1a84=6027] 2024-12-09T11:23:47,282 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/693b5ab06e8c428d0553d9f2b9d9d929/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-12-09T11:23:47,283 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:47,283 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 693b5ab06e8c428d0553d9f2b9d9d929: Waiting for close lock at 1733743427228Running coprocessor pre-close hooks at 1733743427228Disabling compacts and flushes for region at 1733743427228Disabling writes for close at 1733743427228Obtaining lock to block concurrent updates at 1733743427229 (+1 ms)Preparing flush snapshotting stores in 693b5ab06e8c428d0553d9f2b9d9d929 at 1733743427229Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1733743427229Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. at 1733743427230 (+1 ms)Flushing 693b5ab06e8c428d0553d9f2b9d9d929/info: creating writer at 1733743427231 (+1 ms)Flushing 693b5ab06e8c428d0553d9f2b9d9d929/info: appending metadata at 1733743427234 (+3 ms)Flushing 693b5ab06e8c428d0553d9f2b9d9d929/info: closing flushed file at 1733743427235 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@274018c0: reopening flushed file at 1733743427253 (+18 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 693b5ab06e8c428d0553d9f2b9d9d929 in 35ms, sequenceid=78, compaction requested=true at 1733743427263 (+10 ms)Writing region close event to WAL at 1733743427278 (+15 ms)Running coprocessor post-close hooks at 1733743427283 (+5 ms)Closed at 1733743427283 2024-12-09T11:23:47,284 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929. 2024-12-09T11:23:47,285 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/info/4e67453c898d4734ae5d377e3031d9c7 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733743390273.693b5ab06e8c428d0553d9f2b9d9d929./info:regioninfo/1733743390638/Put/seqid=0 2024-12-09T11:23:47,287 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,287 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:47,287 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741898_1084 2024-12-09T11:23:47,288 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:47,289 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,289 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:47,289 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741899_1085 2024-12-09T11:23:47,290 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:47,290 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,291 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-6e8d3015-ba57-4021-94e7-1d66a6920dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:47,291 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741900_1086 2024-12-09T11:23:47,291 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:47,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741901_1087 (size=7089) 2024-12-09T11:23:47,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741901_1087 (size=7089) 2024-12-09T11:23:47,297 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/info/4e67453c898d4734ae5d377e3031d9c7 2024-12-09T11:23:47,327 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/ns/658e0125faeb453e8f5b2064d1bba196 is 43, key is default/ns:d/1733743390020/Put/seqid=0 2024-12-09T11:23:47,329 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,330 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:47,330 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741902_1088 2024-12-09T11:23:47,330 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:47,331 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,332 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK], DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK]) is bad. 2024-12-09T11:23:47,332 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741903_1089 2024-12-09T11:23:47,332 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42609,DS-594d38d1-bd44-495c-9a01-2d76ccfab7dd,DISK] 2024-12-09T11:23:47,333 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,333 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:47,333 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741904_1090 2024-12-09T11:23:47,334 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:47,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741905_1091 (size=5153) 2024-12-09T11:23:47,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741905_1091 (size=5153) 2024-12-09T11:23:47,340 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/ns/658e0125faeb453e8f5b2064d1bba196 2024-12-09T11:23:47,362 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/table/59d7ffee501e476bbda24a103486dc87 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733743390650/Put/seqid=0 2024-12-09T11:23:47,364 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,364 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK], DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK]) is bad. 2024-12-09T11:23:47,364 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741906_1092 2024-12-09T11:23:47,365 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35777,DS-e3b5211e-dfeb-4620-9b14-06da576d6b0e,DISK] 2024-12-09T11:23:47,366 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:23:47,366 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2059541493-172.17.0.3-1733743387425:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK], DatanodeInfoWithStorage[127.0.0.1:34565,DS-185b7880-58c5-4218-ad2f-2db231ca8105,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK]) is bad. 2024-12-09T11:23:47,366 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-2059541493-172.17.0.3-1733743387425:blk_1073741907_1093 2024-12-09T11:23:47,367 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33721,DS-6383a02a-58c5-49df-89f5-405242871e7f,DISK] 2024-12-09T11:23:47,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741908_1094 (size=5424) 2024-12-09T11:23:47,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741908_1094 (size=5424) 2024-12-09T11:23:47,377 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/table/59d7ffee501e476bbda24a103486dc87 2024-12-09T11:23:47,378 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:23:47,378 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:23:47,382 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:47,386 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/info/4e67453c898d4734ae5d377e3031d9c7 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/info/4e67453c898d4734ae5d377e3031d9c7 2024-12-09T11:23:47,392 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/info/4e67453c898d4734ae5d377e3031d9c7, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T11:23:47,393 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/ns/658e0125faeb453e8f5b2064d1bba196 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/ns/658e0125faeb453e8f5b2064d1bba196 2024-12-09T11:23:47,399 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/ns/658e0125faeb453e8f5b2064d1bba196, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T11:23:47,400 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/.tmp/table/59d7ffee501e476bbda24a103486dc87 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/table/59d7ffee501e476bbda24a103486dc87 2024-12-09T11:23:47,407 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/table/59d7ffee501e476bbda24a103486dc87, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T11:23:47,409 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 176ms, sequenceid=11, compaction requested=false 2024-12-09T11:23:47,414 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T11:23:47,415 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:23:47,415 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:47,415 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743427233Running coprocessor pre-close hooks at 1733743427233Disabling compacts and flushes for region at 1733743427233Disabling writes for close at 1733743427233Obtaining lock to block concurrent updates at 1733743427233Preparing flush snapshotting stores in 1588230740 at 1733743427233Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733743427234 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733743427266 (+32 ms)Flushing 1588230740/info: creating writer at 1733743427266Flushing 1588230740/info: appending metadata at 1733743427285 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733743427285Flushing 1588230740/ns: creating writer at 1733743427307 (+22 ms)Flushing 1588230740/ns: appending metadata at 1733743427327 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733743427327Flushing 1588230740/table: creating writer at 1733743427347 (+20 ms)Flushing 1588230740/table: appending metadata at 1733743427362 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733743427362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a974d86: reopening flushed file at 1733743427385 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@940b877: reopening flushed file at 1733743427392 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@777016e7: reopening flushed file at 1733743427399 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 176ms, sequenceid=11, compaction requested=false at 1733743427409 (+10 ms)Writing region close event to WAL at 1733743427410 (+1 ms)Running coprocessor post-close hooks at 1733743427415 (+5 ms)Closed at 1733743427415 2024-12-09T11:23:47,415 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:47,431 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,42933,1733743388938; all regions closed. 2024-12-09T11:23:47,431 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,431 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,432 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,432 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,432 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:47,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741897_1082 (size=825) 2024-12-09T11:23:47,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741897_1082 (size=825) 2024-12-09T11:23:48,242 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:50,043 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T11:23:50,043 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T11:23:50,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741875_1058 (size=12911) 2024-12-09T11:23:50,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741836_1012 (size=76) 2024-12-09T11:23:50,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:23:51,232 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 after 4003ms 2024-12-09T11:23:51,257 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta after 4002ms 2024-12-09T11:23:51,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:52,229 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T11:23:52,232 DEBUG [RS:1;2dff3a36d44f:42489 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C42489%2C1733743390184:(num 1733743390374) 2024-12-09T11:23:52,232 DEBUG [RS:1;2dff3a36d44f:42489 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:52,232 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:52,232 INFO [RS:1;2dff3a36d44f:42489 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42489 2024-12-09T11:23:52,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,42489,1733743390184 2024-12-09T11:23:52,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:52,235 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:52,235 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,42489,1733743390184] 2024-12-09T11:23:52,237 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,42489,1733743390184 already deleted, retry=false 2024-12-09T11:23:52,237 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,42489,1733743390184 expired; onlineServers=1 2024-12-09T11:23:52,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:52,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:52,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42489-0x1012aec38870002, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:52,336 INFO [RS:1;2dff3a36d44f:42489 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:52,337 INFO [RS:1;2dff3a36d44f:42489 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,42489,1733743390184; zookeeper connection closed. 2024-12-09T11:23:52,337 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@361cd12e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@361cd12e 2024-12-09T11:23:52,433 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T11:23:52,437 DEBUG [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs 2024-12-09T11:23:52,437 INFO [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C42933%2C1733743388938.meta:.meta(num 1733743427234) 2024-12-09T11:23:52,437 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,437 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,437 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,438 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,438 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741890_1074 (size=14682) 2024-12-09T11:23:52,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741890_1074 (size=14682) 2024-12-09T11:23:52,443 DEBUG [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs 2024-12-09T11:23:52,443 INFO [RS:0;2dff3a36d44f:42933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C42933%2C1733743388938:(num 1733743426982) 2024-12-09T11:23:52,443 DEBUG [RS:0;2dff3a36d44f:42933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:52,443 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:23:52,443 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:52,443 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:52,443 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:52,443 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:52,443 INFO [RS:0;2dff3a36d44f:42933 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42933 2024-12-09T11:23:52,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,42933,1733743388938 2024-12-09T11:23:52,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:52,446 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:52,447 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,42933,1733743388938] 2024-12-09T11:23:52,449 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,42933,1733743388938 already deleted, retry=false 2024-12-09T11:23:52,449 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,42933,1733743388938 expired; onlineServers=0 2024-12-09T11:23:52,449 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,43697,1733743388823' ***** 2024-12-09T11:23:52,449 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:23:52,449 INFO [M:0;2dff3a36d44f:43697 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:23:52,449 INFO [M:0;2dff3a36d44f:43697 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:23:52,449 DEBUG [M:0;2dff3a36d44f:43697 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:23:52,449 DEBUG [M:0;2dff3a36d44f:43697 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:23:52,449 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:23:52,449 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743389294 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743389294,5,FailOnTimeoutGroup] 2024-12-09T11:23:52,449 INFO [M:0;2dff3a36d44f:43697 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:23:52,449 INFO [M:0;2dff3a36d44f:43697 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:23:52,449 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743389294 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743389294,5,FailOnTimeoutGroup] 2024-12-09T11:23:52,449 DEBUG [M:0;2dff3a36d44f:43697 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:23:52,449 INFO [M:0;2dff3a36d44f:43697 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:23:52,450 INFO [M:0;2dff3a36d44f:43697 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:23:52,450 INFO [M:0;2dff3a36d44f:43697 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:23:52,450 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:23:52,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:23:52,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:52,451 DEBUG [M:0;2dff3a36d44f:43697 {}] zookeeper.ZKUtil(347): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:23:52,451 WARN [M:0;2dff3a36d44f:43697 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:23:52,452 INFO [M:0;2dff3a36d44f:43697 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/.lastflushedseqids 2024-12-09T11:23:52,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741909_1095 (size=130) 2024-12-09T11:23:52,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741909_1095 (size=130) 2024-12-09T11:23:52,460 INFO [M:0;2dff3a36d44f:43697 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:23:52,460 INFO [M:0;2dff3a36d44f:43697 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:23:52,461 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:23:52,461 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:52,461 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:52,461 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:23:52,461 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:52,461 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-09T11:23:52,477 DEBUG [M:0;2dff3a36d44f:43697 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7129921d7da44e04879312200e644991 is 82, key is hbase:meta,,1/info:regioninfo/1733743389996/Put/seqid=0 2024-12-09T11:23:52,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741910_1096 (size=5672) 2024-12-09T11:23:52,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741910_1096 (size=5672) 2024-12-09T11:23:52,483 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7129921d7da44e04879312200e644991 2024-12-09T11:23:52,503 DEBUG [M:0;2dff3a36d44f:43697 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5cab4ef592c40d89f3a6d034093ff6a is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733743390655/Put/seqid=0 2024-12-09T11:23:52,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741911_1097 (size=6255) 2024-12-09T11:23:52,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741911_1097 (size=6255) 2024-12-09T11:23:52,509 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5cab4ef592c40d89f3a6d034093ff6a 2024-12-09T11:23:52,514 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d5cab4ef592c40d89f3a6d034093ff6a 2024-12-09T11:23:52,529 DEBUG [M:0;2dff3a36d44f:43697 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c2e61a19a6054b729554f2f5aafa32d4 is 69, key is 2dff3a36d44f,42489,1733743390184/rs:state/1733743390225/Put/seqid=0 2024-12-09T11:23:52,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741912_1098 (size=5224) 2024-12-09T11:23:52,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741912_1098 (size=5224) 2024-12-09T11:23:52,535 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c2e61a19a6054b729554f2f5aafa32d4 2024-12-09T11:23:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:52,548 INFO [RS:0;2dff3a36d44f:42933 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42933-0x1012aec38870001, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:52,548 INFO [RS:0;2dff3a36d44f:42933 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,42933,1733743388938; zookeeper connection closed. 2024-12-09T11:23:52,548 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b41d7b5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b41d7b5 2024-12-09T11:23:52,548 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T11:23:52,556 DEBUG [M:0;2dff3a36d44f:43697 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8d22e47e7aca4b13b35dc2428cfebb6c is 52, key is load_balancer_on/state:d/1733743390134/Put/seqid=0 2024-12-09T11:23:52,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741913_1099 (size=5056) 2024-12-09T11:23:52,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741913_1099 (size=5056) 2024-12-09T11:23:52,561 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8d22e47e7aca4b13b35dc2428cfebb6c 2024-12-09T11:23:52,568 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7129921d7da44e04879312200e644991 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7129921d7da44e04879312200e644991 2024-12-09T11:23:52,573 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7129921d7da44e04879312200e644991, entries=8, sequenceid=60, filesize=5.5 K 2024-12-09T11:23:52,574 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5cab4ef592c40d89f3a6d034093ff6a as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5cab4ef592c40d89f3a6d034093ff6a 2024-12-09T11:23:52,580 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d5cab4ef592c40d89f3a6d034093ff6a 2024-12-09T11:23:52,580 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5cab4ef592c40d89f3a6d034093ff6a, entries=6, sequenceid=60, filesize=6.1 K 2024-12-09T11:23:52,581 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c2e61a19a6054b729554f2f5aafa32d4 as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c2e61a19a6054b729554f2f5aafa32d4 2024-12-09T11:23:52,585 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c2e61a19a6054b729554f2f5aafa32d4, entries=2, sequenceid=60, filesize=5.1 K 2024-12-09T11:23:52,586 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8d22e47e7aca4b13b35dc2428cfebb6c as hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8d22e47e7aca4b13b35dc2428cfebb6c 2024-12-09T11:23:52,591 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8d22e47e7aca4b13b35dc2428cfebb6c, entries=1, sequenceid=60, filesize=4.9 K 2024-12-09T11:23:52,592 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=60, compaction requested=false 2024-12-09T11:23:52,593 INFO [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:52,594 DEBUG [M:0;2dff3a36d44f:43697 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743432461Disabling compacts and flushes for region at 1733743432461Disabling writes for close at 1733743432461Obtaining lock to block concurrent updates at 1733743432461Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743432461Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733743432461Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743432462 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743432462Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743432477 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743432477Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743432488 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743432503 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743432503Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743432515 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743432529 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743432529Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743432539 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743432555 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743432555Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@268ab1ee: reopening flushed file at 1733743432567 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ac8b669: reopening flushed file at 1733743432574 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e5edabc: reopening flushed file at 1733743432580 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f22949d: reopening flushed file at 1733743432586 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=60, compaction requested=false at 1733743432592 (+6 ms)Writing region close event to WAL at 1733743432593 (+1 ms)Closed at 1733743432593 2024-12-09T11:23:52,594 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,594 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,594 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,594 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,595 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:23:52,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741888_1071 (size=1045) 2024-12-09T11:23:52,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34565 is added to blk_1073741888_1071 (size=1045) 2024-12-09T11:23:52,821 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:23:52,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:52,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:23:53,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:53,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:53,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:53,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741835_1011 (size=393) 2024-12-09T11:23:53,694 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6e480ac2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2059541493-172.17.0.3-1733743387425:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:35777,null,null]) java.net.ConnectException: Call From 2dff3a36d44f/172.17.0.3 to localhost:35811 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T11:23:54,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:54,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:54,331 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/WALs/2dff3a36d44f,43697,1733743388823/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/oldWALs/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 2024-12-09T11:23:54,337 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/MasterData/oldWALs/2dff3a36d44f%2C43697%2C1733743388823.1733743389176 to hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/oldWALs/2dff3a36d44f%2C43697%2C1733743388823.1733743389176$masterlocalwal$ 2024-12-09T11:23:54,337 INFO [M:0;2dff3a36d44f:43697 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:23:54,337 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:23:54,337 INFO [M:0;2dff3a36d44f:43697 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43697 2024-12-09T11:23:54,337 INFO [M:0;2dff3a36d44f:43697 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:23:54,455 INFO [M:0;2dff3a36d44f:43697 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:23:54,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:54,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43697-0x1012aec38870000, quorum=127.0.0.1:60908, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:23:54,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73c11417{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:54,460 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68fc6dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:54,460 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:54,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6409ef82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:54,461 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5dd34496{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:54,465 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@fc1296e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2059541493-172.17.0.3-1733743387425:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:35777,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35811 , LocalHost:localPort 2dff3a36d44f/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T11:23:54,466 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:54,466 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:54,466 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:54,466 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2059541493-172.17.0.3-1733743387425 (Datanode Uuid 2246728c-2716-42f5-9852-b7b26aeb1f07) service to localhost/127.0.0.1:42671 2024-12-09T11:23:54,466 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@fc1296e {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-2059541493-172.17.0.3-1733743387425 2024-12-09T11:23:54,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data3/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:54,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data4/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:54,467 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@fc1296e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34565,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2059541493-172.17.0.3-1733743387425 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:54,468 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@fc1296e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35777,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2059541493-172.17.0.3-1733743387425 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:23:54,468 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:54,468 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@fc1296e {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34565,null,null], DatanodeInfoWithStorage[127.0.0.1:35777,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-2059541493-172.17.0.3-1733743387425:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:34565,null,null], DatanodeInfoWithStorage[127.0.0.1:35777,null,null]] 2024-12-09T11:23:54,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19df8718{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:54,491 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42c8e27e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:54,491 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:54,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a7d29fa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:54,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bf7e565{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:54,497 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:23:54,497 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:23:54,497 WARN [BP-2059541493-172.17.0.3-1733743387425 heartbeating to localhost/127.0.0.1:42671 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2059541493-172.17.0.3-1733743387425 (Datanode Uuid be79e6b3-d4cb-48c0-80b4-1f6daeeb8041) service to localhost/127.0.0.1:42671 2024-12-09T11:23:54,498 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:23:54,498 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data5/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:54,499 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/cluster_1c9cb852-6fc7-714c-c5cb-4148824417e5/data/data6/current/BP-2059541493-172.17.0.3-1733743387425 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:23:54,499 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:23:54,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c7e90d4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:23:54,509 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e510a6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:23:54,509 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:23:54,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35be1ff6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:23:54,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@576276fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir/,STOPPED} 2024-12-09T11:23:54,522 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:23:54,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:23:54,609 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42671 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39687 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42671 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42671 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42671 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$896/0x00007fc184bf7628.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42671 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42671 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42671 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42671 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$896/0x00007fc184bf7628.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42671 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39687 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:42671 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42671 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=417 (was 486), ProcessCount=11 (was 11), AvailableMemoryMB=1324 (was 2014) 2024-12-09T11:23:54,617 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=417, ProcessCount=11, AvailableMemoryMB=1324 2024-12-09T11:23:54,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:23:54,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.log.dir so I do NOT create it in target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274 2024-12-09T11:23:54,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c1f241c4-82b9-bf6b-8014-645fe2cf69dd/hadoop.tmp.dir so I do NOT create it in target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274 2024-12-09T11:23:54,617 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de, deleteOnExit=true 2024-12-09T11:23:54,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:23:54,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/test.cache.data in system properties and HBase conf 2024-12-09T11:23:54,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:23:54,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:23:54,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:23:54,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:23:54,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:23:54,618 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:23:54,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:23:54,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:23:54,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:23:54,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:23:54,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:23:54,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:23:54,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:23:54,638 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:23:54,740 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:54,748 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:54,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:54,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:54,759 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:54,763 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:54,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@445db56c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:54,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a073962{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:54,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@531c70ff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-46403-hadoop-hdfs-3_4_1-tests_jar-_-any-1393695894713048191/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:23:54,917 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e129549{HTTP/1.1, (http/1.1)}{localhost:46403} 2024-12-09T11:23:54,918 INFO [Time-limited test {}] server.Server(415): Started @156191ms 2024-12-09T11:23:54,935 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:23:55,016 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:55,019 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:55,022 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:55,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:55,023 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:55,023 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ad986b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:55,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8a2f233{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:55,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f7e1003{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-44071-hadoop-hdfs-3_4_1-tests_jar-_-any-5327603515545420323/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:55,141 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41d89f44{HTTP/1.1, (http/1.1)}{localhost:44071} 2024-12-09T11:23:55,142 INFO [Time-limited test {}] server.Server(415): Started @156415ms 2024-12-09T11:23:55,144 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:55,175 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:23:55,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:23:55,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:23:55,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:23:55,179 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:23:55,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@683c9ef0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:23:55,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b80746b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:23:55,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:55,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:55,260 WARN [Thread-1190 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data1/current/BP-503541886-172.17.0.3-1733743434659/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:55,260 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data2/current/BP-503541886-172.17.0.3-1733743434659/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:55,284 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:55,287 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3078b2b078faed90 with lease ID 0xad94f03b2dac8764: Processing first storage report for DS-65b26615-0165-4436-8104-1fa8ecfc7d52 from datanode DatanodeRegistration(127.0.0.1:44301, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=42817, infoSecurePort=0, ipcPort=35475, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659) 2024-12-09T11:23:55,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3078b2b078faed90 with lease ID 0xad94f03b2dac8764: from storage DS-65b26615-0165-4436-8104-1fa8ecfc7d52 node DatanodeRegistration(127.0.0.1:44301, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=42817, infoSecurePort=0, ipcPort=35475, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:55,287 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3078b2b078faed90 with lease ID 0xad94f03b2dac8764: Processing first storage report for DS-d2b16396-1fe5-45af-8ae0-976a250c901c from datanode DatanodeRegistration(127.0.0.1:44301, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=42817, infoSecurePort=0, ipcPort=35475, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659) 2024-12-09T11:23:55,287 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3078b2b078faed90 with lease ID 0xad94f03b2dac8764: from storage DS-d2b16396-1fe5-45af-8ae0-976a250c901c node DatanodeRegistration(127.0.0.1:44301, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=42817, infoSecurePort=0, ipcPort=35475, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:55,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2080f13e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-40415-hadoop-hdfs-3_4_1-tests_jar-_-any-15110642800663091915/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:23:55,305 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b1ed5b{HTTP/1.1, (http/1.1)}{localhost:40415} 2024-12-09T11:23:55,305 INFO [Time-limited test {}] server.Server(415): Started @156578ms 2024-12-09T11:23:55,306 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:23:55,413 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data4/current/BP-503541886-172.17.0.3-1733743434659/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:55,413 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data3/current/BP-503541886-172.17.0.3-1733743434659/current, will proceed with Du for space computation calculation, 2024-12-09T11:23:55,433 WARN [Thread-1205 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:23:55,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ca5af3681d688c5 with lease ID 0xad94f03b2dac8765: Processing first storage report for DS-7e2caeb4-bdb9-4978-8354-5317be0306ea from datanode DatanodeRegistration(127.0.0.1:40449, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=34469, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659) 2024-12-09T11:23:55,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ca5af3681d688c5 with lease ID 0xad94f03b2dac8765: from storage DS-7e2caeb4-bdb9-4978-8354-5317be0306ea node DatanodeRegistration(127.0.0.1:40449, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=34469, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:55,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ca5af3681d688c5 with lease ID 0xad94f03b2dac8765: Processing first storage report for DS-a0113b89-c40b-46d7-b92d-31369bd2748e from datanode DatanodeRegistration(127.0.0.1:40449, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=34469, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659) 2024-12-09T11:23:55,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ca5af3681d688c5 with lease ID 0xad94f03b2dac8765: from storage DS-a0113b89-c40b-46d7-b92d-31369bd2748e node DatanodeRegistration(127.0.0.1:40449, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=34469, infoSecurePort=0, ipcPort=39837, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:23:55,531 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274 2024-12-09T11:23:55,534 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/zookeeper_0, clientPort=53398, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:23:55,535 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53398 2024-12-09T11:23:55,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:23:55,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:23:55,549 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd with version=8 2024-12-09T11:23:55,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:23:55,552 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:23:55,552 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:55,553 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38387 2024-12-09T11:23:55,554 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38387 connecting to ZooKeeper ensemble=127.0.0.1:53398 2024-12-09T11:23:55,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383870x0, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:55,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38387-0x1012aecef2d0000 connected 2024-12-09T11:23:55,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,602 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:55,602 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd, hbase.cluster.distributed=false 2024-12-09T11:23:55,604 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:55,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38387 2024-12-09T11:23:55,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38387 2024-12-09T11:23:55,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38387 2024-12-09T11:23:55,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38387 2024-12-09T11:23:55,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38387 2024-12-09T11:23:55,628 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:23:55,628 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:23:55,630 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34839 2024-12-09T11:23:55,632 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34839 connecting to ZooKeeper ensemble=127.0.0.1:53398 2024-12-09T11:23:55,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348390x0, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:23:55,651 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:348390x0, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:23:55,652 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:23:55,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34839-0x1012aecef2d0001 connected 2024-12-09T11:23:55,659 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:23:55,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:23:55,662 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:23:55,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34839 2024-12-09T11:23:55,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34839 2024-12-09T11:23:55,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34839 2024-12-09T11:23:55,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34839 2024-12-09T11:23:55,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34839 2024-12-09T11:23:55,687 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:38387 2024-12-09T11:23:55,687 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:55,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:55,696 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:23:55,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,698 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:23:55,699 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,38387,1733743435551 from backup master directory 2024-12-09T11:23:55,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:55,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:23:55,700 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:55,700 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,705 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/hbase.id] with ID: 8d90c011-af45-4960-989b-0eb8d65a1344 2024-12-09T11:23:55,705 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/.tmp/hbase.id 2024-12-09T11:23:55,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:55,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:23:55,712 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/.tmp/hbase.id]:[hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/hbase.id] 2024-12-09T11:23:55,726 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:55,726 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:23:55,727 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T11:23:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:23:55,738 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:23:55,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:23:55,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:55,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:23:55,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:55,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:23:55,748 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store 2024-12-09T11:23:55,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:23:55,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:23:55,755 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:55,755 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:23:55,755 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:55,755 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:55,755 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:23:55,755 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:55,755 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:23:55,755 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743435755Disabling compacts and flushes for region at 1733743435755Disabling writes for close at 1733743435755Writing region close event to WAL at 1733743435755Closed at 1733743435755 2024-12-09T11:23:55,756 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/.initializing 2024-12-09T11:23:55,756 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,759 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C38387%2C1733743435551, suffix=, logDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551, archiveDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/oldWALs, maxLogs=10 2024-12-09T11:23:55,759 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C38387%2C1733743435551.1733743435759 2024-12-09T11:23:55,764 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 2024-12-09T11:23:55,765 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42817:42817),(127.0.0.1/127.0.0.1:34469:34469)] 2024-12-09T11:23:55,765 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:55,766 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:55,766 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,766 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:23:55,769 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:55,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:55,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:23:55,771 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:55,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:55,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:23:55,772 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:55,773 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:55,773 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,774 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:23:55,774 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:55,774 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:55,775 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,775 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,775 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,777 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,777 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,778 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:23:55,779 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:23:55,782 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:55,782 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856865, jitterRate=0.0895611047744751}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:23:55,783 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743435766Initializing all the Stores at 1733743435767 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743435767Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743435767Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743435767Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743435767Cleaning up temporary data from old regions at 1733743435777 (+10 ms)Region opened successfully at 1733743435783 (+6 ms) 2024-12-09T11:23:55,783 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:23:55,786 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b4fb87b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:55,787 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:23:55,787 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:23:55,787 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:23:55,787 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:23:55,788 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T11:23:55,788 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T11:23:55,788 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:23:55,790 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:23:55,791 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:23:55,793 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:23:55,794 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:23:55,794 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:23:55,796 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:23:55,796 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:23:55,797 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:23:55,798 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:23:55,799 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:23:55,801 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:23:55,803 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:23:55,810 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:23:55,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:55,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:23:55,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,812 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,38387,1733743435551, sessionid=0x1012aecef2d0000, setting cluster-up flag (Was=false) 2024-12-09T11:23:55,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,820 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:23:55,821 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:55,832 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:23:55,833 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:55,834 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:23:55,836 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:55,837 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:23:55,837 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:23:55,837 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,38387,1733743435551 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:23:55,838 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:55,838 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:55,839 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:55,839 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:23:55,839 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:23:55,839 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,839 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:55,839 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,840 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743465840 2024-12-09T11:23:55,840 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:23:55,840 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:23:55,840 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:23:55,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:23:55,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:23:55,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:23:55,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:23:55,841 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:55,841 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:23:55,841 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:23:55,842 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:23:55,842 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:23:55,842 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:23:55,843 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:55,843 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:23:55,846 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743435842,5,FailOnTimeoutGroup] 2024-12-09T11:23:55,850 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743435846,5,FailOnTimeoutGroup] 2024-12-09T11:23:55,850 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,850 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:23:55,850 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,850 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:23:55,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:23:55,889 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(746): ClusterId : 8d90c011-af45-4960-989b-0eb8d65a1344 2024-12-09T11:23:55,889 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:23:55,895 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:23:55,895 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:23:55,897 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:23:55,898 DEBUG [RS:0;2dff3a36d44f:34839 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3615fd5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:23:55,924 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:34839 2024-12-09T11:23:55,924 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:23:55,924 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:23:55,924 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:23:55,925 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,38387,1733743435551 with port=34839, startcode=1733743435627 2024-12-09T11:23:55,926 DEBUG [RS:0;2dff3a36d44f:34839 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:23:55,929 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56333, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:23:55,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38387 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:55,930 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38387 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:55,932 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd 2024-12-09T11:23:55,932 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34165 2024-12-09T11:23:55,932 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:23:55,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:23:55,935 DEBUG [RS:0;2dff3a36d44f:34839 {}] zookeeper.ZKUtil(111): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:55,935 WARN [RS:0;2dff3a36d44f:34839 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:23:55,935 INFO [RS:0;2dff3a36d44f:34839 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:55,935 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:55,939 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,34839,1733743435627] 2024-12-09T11:23:55,949 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:23:55,956 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:23:55,957 INFO [RS:0;2dff3a36d44f:34839 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:23:55,957 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,958 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:23:55,959 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:23:55,959 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,959 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,959 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:55,960 DEBUG [RS:0;2dff3a36d44f:34839 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:23:55,964 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,964 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,964 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,964 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,964 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,964 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,34839,1733743435627-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:55,981 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:23:55,981 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,34839,1733743435627-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,981 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,981 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.Replication(171): 2dff3a36d44f,34839,1733743435627 started 2024-12-09T11:23:55,996 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:55,996 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,34839,1733743435627, RpcServer on 2dff3a36d44f/172.17.0.3:34839, sessionid=0x1012aecef2d0001 2024-12-09T11:23:55,997 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:23:55,997 DEBUG [RS:0;2dff3a36d44f:34839 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:55,997 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,34839,1733743435627' 2024-12-09T11:23:55,997 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:23:55,997 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,34839,1733743435627' 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:23:55,998 DEBUG [RS:0;2dff3a36d44f:34839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:23:55,998 INFO [RS:0;2dff3a36d44f:34839 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:23:55,998 INFO [RS:0;2dff3a36d44f:34839 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:23:56,100 INFO [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C34839%2C1733743435627, suffix=, logDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627, archiveDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs, maxLogs=32 2024-12-09T11:23:56,101 INFO [RS:0;2dff3a36d44f:34839 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:23:56,108 INFO [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:23:56,109 DEBUG [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34469:34469),(127.0.0.1/127.0.0.1:42817:42817)] 2024-12-09T11:23:56,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T11:23:56,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:23:56,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:23:56,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:56,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:56,278 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:23:56,279 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd 2024-12-09T11:23:56,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741833_1009 (size=32) 2024-12-09T11:23:56,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741833_1009 (size=32) 2024-12-09T11:23:56,289 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:56,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:23:56,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:23:56,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:23:56,293 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:23:56,293 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:23:56,295 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:23:56,295 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,295 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:23:56,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:23:56,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:23:56,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740 2024-12-09T11:23:56,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740 2024-12-09T11:23:56,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:23:56,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:23:56,300 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:23:56,301 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:23:56,302 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:56,303 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696694, jitterRate=-0.11410872638225555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:23:56,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743436289Initializing all the Stores at 1733743436290 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743436290Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743436290Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743436290Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743436290Cleaning up temporary data from old regions at 1733743436299 (+9 ms)Region opened successfully at 1733743436304 (+5 ms) 2024-12-09T11:23:56,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:23:56,304 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:23:56,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:23:56,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:23:56,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:23:56,304 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:23:56,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743436304Disabling compacts and flushes for region at 1733743436304Disabling writes for close at 1733743436304Writing region close event to WAL at 1733743436304Closed at 1733743436304 2024-12-09T11:23:56,305 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:56,305 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:23:56,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:23:56,307 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:23:56,308 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:23:56,458 DEBUG [2dff3a36d44f:38387 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:23:56,459 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:56,461 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,34839,1733743435627, state=OPENING 2024-12-09T11:23:56,463 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:23:56,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:56,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:23:56,467 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:23:56,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,34839,1733743435627}] 2024-12-09T11:23:56,467 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:56,467 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:56,621 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:23:56,623 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47299, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:23:56,626 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:23:56,627 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:23:56,628 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C34839%2C1733743435627.meta, suffix=.meta, logDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627, archiveDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs, maxLogs=32 2024-12-09T11:23:56,629 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta 2024-12-09T11:23:56,634 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta 2024-12-09T11:23:56,634 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42817:42817),(127.0.0.1/127.0.0.1:34469:34469)] 2024-12-09T11:23:56,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:56,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:23:56,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:23:56,635 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:23:56,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:23:56,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:56,636 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:23:56,636 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:23:56,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:23:56,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:23:56,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:23:56,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:23:56,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:23:56,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:23:56,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:23:56,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:23:56,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:23:56,642 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:23:56,642 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740 2024-12-09T11:23:56,643 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740 2024-12-09T11:23:56,644 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:23:56,645 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:23:56,645 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:23:56,646 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:23:56,647 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711627, jitterRate=-0.0951198935508728}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:23:56,647 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:23:56,648 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743436636Writing region info on filesystem at 1733743436636Initializing all the Stores at 1733743436636Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743436636Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743436637 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743436637Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743436637Cleaning up temporary data from old regions at 1733743436645 (+8 ms)Running coprocessor post-open hooks at 1733743436647 (+2 ms)Region opened successfully at 1733743436648 (+1 ms) 2024-12-09T11:23:56,649 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743436621 2024-12-09T11:23:56,651 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:23:56,651 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:23:56,652 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:56,653 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,34839,1733743435627, state=OPEN 2024-12-09T11:23:56,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:23:56,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:23:56,659 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:56,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:56,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:23:56,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:23:56,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,34839,1733743435627 in 192 msec 2024-12-09T11:23:56,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:23:56,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 356 msec 2024-12-09T11:23:56,665 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:23:56,665 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:23:56,667 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:23:56,667 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,34839,1733743435627, seqNum=-1] 2024-12-09T11:23:56,667 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:23:56,668 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35831, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:23:56,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 837 msec 2024-12-09T11:23:56,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743436674, completionTime=-1 2024-12-09T11:23:56,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:23:56,674 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743496676 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743556676 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38387,1733743435551-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38387,1733743435551-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38387,1733743435551-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:38387, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,676 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,677 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,678 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.980sec 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38387,1733743435551-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:23:56,680 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38387,1733743435551-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:23:56,683 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:23:56,683 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:23:56,683 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38387,1733743435551-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:23:56,684 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a8866d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:56,684 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,38387,-1 for getting cluster id 2024-12-09T11:23:56,684 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:23:56,686 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d90c011-af45-4960-989b-0eb8d65a1344' 2024-12-09T11:23:56,686 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:23:56,686 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d90c011-af45-4960-989b-0eb8d65a1344" 2024-12-09T11:23:56,687 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74503d8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:56,687 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,38387,-1] 2024-12-09T11:23:56,687 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:23:56,687 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:23:56,688 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:23:56,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ecb97cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:23:56,690 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:23:56,691 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,34839,1733743435627, seqNum=-1] 2024-12-09T11:23:56,691 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:23:56,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35228, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:23:56,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:56,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:23:56,699 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:23:56,699 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-09T11:23:56,699 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-09T11:23:56,699 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T11:23:56,700 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 2dff3a36d44f,38387,1733743435551 2024-12-09T11:23:56,700 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1001e7ea 2024-12-09T11:23:56,701 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T11:23:56,707 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T11:23:56,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T11:23:56,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T11:23:56,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:23:56,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T11:23:56,711 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:23:56,711 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:56,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-09T11:23:56,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:23:56,712 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:23:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741835_1011 (size=395) 2024-12-09T11:23:56,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741835_1011 (size=395) 2024-12-09T11:23:56,721 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 990ddf2ec505ddbcb6f79c2362cb9674, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd 2024-12-09T11:23:56,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741836_1012 (size=78) 2024-12-09T11:23:56,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40449 is added to blk_1073741836_1012 (size=78) 2024-12-09T11:23:56,728 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:56,728 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 990ddf2ec505ddbcb6f79c2362cb9674, disabling compactions & flushes 2024-12-09T11:23:56,728 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:56,728 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:56,728 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. after waiting 0 ms 2024-12-09T11:23:56,728 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:56,729 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:56,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 990ddf2ec505ddbcb6f79c2362cb9674: Waiting for close lock at 1733743436728Disabling compacts and flushes for region at 1733743436728Disabling writes for close at 1733743436728Writing region close event to WAL at 1733743436728Closed at 1733743436728 2024-12-09T11:23:56,730 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:23:56,730 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733743436730"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743436730"}]},"ts":"1733743436730"} 2024-12-09T11:23:56,733 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T11:23:56,734 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:23:56,734 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743436734"}]},"ts":"1733743436734"} 2024-12-09T11:23:56,736 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T11:23:56,736 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=990ddf2ec505ddbcb6f79c2362cb9674, ASSIGN}] 2024-12-09T11:23:56,738 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=990ddf2ec505ddbcb6f79c2362cb9674, ASSIGN 2024-12-09T11:23:56,739 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=990ddf2ec505ddbcb6f79c2362cb9674, ASSIGN; state=OFFLINE, location=2dff3a36d44f,34839,1733743435627; forceNewPlan=false, retain=false 2024-12-09T11:23:56,890 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=990ddf2ec505ddbcb6f79c2362cb9674, regionState=OPENING, regionLocation=2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:56,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=990ddf2ec505ddbcb6f79c2362cb9674, ASSIGN because future has completed 2024-12-09T11:23:56,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 990ddf2ec505ddbcb6f79c2362cb9674, server=2dff3a36d44f,34839,1733743435627}] 2024-12-09T11:23:57,053 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:57,054 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 990ddf2ec505ddbcb6f79c2362cb9674, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:23:57,054 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,054 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:23:57,054 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,054 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,055 INFO [StoreOpener-990ddf2ec505ddbcb6f79c2362cb9674-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,057 INFO [StoreOpener-990ddf2ec505ddbcb6f79c2362cb9674-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 990ddf2ec505ddbcb6f79c2362cb9674 columnFamilyName info 2024-12-09T11:23:57,057 DEBUG [StoreOpener-990ddf2ec505ddbcb6f79c2362cb9674-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:23:57,057 INFO [StoreOpener-990ddf2ec505ddbcb6f79c2362cb9674-1 {}] regionserver.HStore(327): Store=990ddf2ec505ddbcb6f79c2362cb9674/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:23:57,057 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,058 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,058 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,059 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,059 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,060 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,063 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:23:57,063 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 990ddf2ec505ddbcb6f79c2362cb9674; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787197, jitterRate=9.730905294418335E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:23:57,063 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:23:57,064 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 990ddf2ec505ddbcb6f79c2362cb9674: Running coprocessor pre-open hook at 1733743437054Writing region info on filesystem at 1733743437054Initializing all the Stores at 1733743437055 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743437055Cleaning up temporary data from old regions at 1733743437059 (+4 ms)Running coprocessor post-open hooks at 1733743437063 (+4 ms)Region opened successfully at 1733743437064 (+1 ms) 2024-12-09T11:23:57,065 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674., pid=6, masterSystemTime=1733743437049 2024-12-09T11:23:57,068 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:57,068 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:23:57,069 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=990ddf2ec505ddbcb6f79c2362cb9674, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,34839,1733743435627 2024-12-09T11:23:57,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 990ddf2ec505ddbcb6f79c2362cb9674, server=2dff3a36d44f,34839,1733743435627 because future has completed 2024-12-09T11:23:57,076 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T11:23:57,076 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 990ddf2ec505ddbcb6f79c2362cb9674, server=2dff3a36d44f,34839,1733743435627 in 179 msec 2024-12-09T11:23:57,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T11:23:57,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=990ddf2ec505ddbcb6f79c2362cb9674, ASSIGN in 340 msec 2024-12-09T11:23:57,079 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:23:57,080 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743437080"}]},"ts":"1733743437080"} 2024-12-09T11:23:57,082 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T11:23:57,083 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:23:57,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 375 msec 2024-12-09T11:23:57,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:57,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:58,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:58,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:59,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:23:59,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:00,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:00,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:01,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:01,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:01,674 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:24:01,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:01,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:24:01,950 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T11:24:02,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:02,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:03,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:03,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:04,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:04,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:05,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:05,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:06,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:24:06,171 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T11:24:06,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T11:24:06,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T11:24:06,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:24:06,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T11:24:06,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:06,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:06,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38387 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:24:06,765 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-09T11:24:06,766 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-09T11:24:06,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T11:24:06,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:24:06,772 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674., hostname=2dff3a36d44f,34839,1733743435627, seqNum=2] 2024-12-09T11:24:07,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:07,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:08,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:08,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:08,776 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:24:08,776 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:08,777 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:08,777 WARN [PacketResponder: BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40449] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,777 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:08,777 WARN [DataStreamer for file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 block BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK], DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]) is bad. 2024-12-09T11:24:08,777 WARN [DataStreamer for file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta block BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK], DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]) is bad. 2024-12-09T11:24:08,777 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:38964 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38964 dst: /127.0.0.1:44301 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,778 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:37502 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:40449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37502 dst: /127.0.0.1:40449 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,778 WARN [DataStreamer for file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 block BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK], DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40449,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]) is bad. 2024-12-09T11:24:08,778 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1705327480_22 at /127.0.0.1:37484 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37484 dst: /127.0.0.1:40449 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,778 WARN [PacketResponder: BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40449] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1705327480_22 at /127.0.0.1:38942 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38942 dst: /127.0.0.1:44301 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,779 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:38974 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38974 dst: /127.0.0.1:44301 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,780 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:37522 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37522 dst: /127.0.0.1:40449 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,791 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2080f13e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:08,792 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b1ed5b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:08,792 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:08,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b80746b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:08,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@683c9ef0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:08,800 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:24:08,800 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-503541886-172.17.0.3-1733743434659 (Datanode Uuid 70ace615-e940-44cc-b038-32c5947ae7b5) service to localhost/127.0.0.1:34165 2024-12-09T11:24:08,800 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:24:08,800 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:24:08,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data3/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:08,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data4/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:08,801 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:24:08,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:08,838 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:08,839 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:08,839 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:08,839 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:24:08,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@292d948{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:08,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26bb2a2d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:08,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fbde2b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-40763-hadoop-hdfs-3_4_1-tests_jar-_-any-4235314927577085197/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:08,966 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7dc1dfbb{HTTP/1.1, (http/1.1)}{localhost:40763} 2024-12-09T11:24:08,967 INFO [Time-limited test {}] server.Server(415): Started @170240ms 2024-12-09T11:24:08,968 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:24:08,987 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:08,987 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:08,987 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:08,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:38800 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38800 dst: /127.0.0.1:44301 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:38796 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38796 dst: /127.0.0.1:44301 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1705327480_22 at /127.0.0.1:38792 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38792 dst: /127.0.0.1:44301 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:08,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f7e1003{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:08,991 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41d89f44{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:08,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:08,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8a2f233{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:08,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ad986b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:08,995 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:24:08,995 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-503541886-172.17.0.3-1733743434659 (Datanode Uuid 6642d444-cecf-474d-9026-d334659a9cd7) service to localhost/127.0.0.1:34165 2024-12-09T11:24:08,995 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:24:08,995 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:24:08,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data1/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:08,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data2/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:08,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:24:09,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:09,011 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:09,012 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:09,012 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:09,012 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:24:09,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bae15bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:09,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bd4204e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:09,070 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:24:09,073 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x264ddc28e59d0bbd with lease ID 0xad94f03b2dac8766: from storage DS-7e2caeb4-bdb9-4978-8354-5317be0306ea node DatanodeRegistration(127.0.0.1:40731, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=46125, infoSecurePort=0, ipcPort=46547, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:09,073 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x264ddc28e59d0bbd with lease ID 0xad94f03b2dac8766: from storage DS-a0113b89-c40b-46d7-b92d-31369bd2748e node DatanodeRegistration(127.0.0.1:40731, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=46125, infoSecurePort=0, ipcPort=46547, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:24:09,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e1d853e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-45973-hadoop-hdfs-3_4_1-tests_jar-_-any-1061246685876363191/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:09,148 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32d80742{HTTP/1.1, (http/1.1)}{localhost:45973} 2024-12-09T11:24:09,148 INFO [Time-limited test {}] server.Server(415): Started @170421ms 2024-12-09T11:24:09,149 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:24:09,237 WARN [Thread-1371 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:24:09,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd402ea939018f713 with lease ID 0xad94f03b2dac8767: from storage DS-65b26615-0165-4436-8104-1fa8ecfc7d52 node DatanodeRegistration(127.0.0.1:41909, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=41955, infoSecurePort=0, ipcPort=44101, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:09,240 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd402ea939018f713 with lease ID 0xad94f03b2dac8767: from storage DS-d2b16396-1fe5-45af-8ae0-976a250c901c node DatanodeRegistration(127.0.0.1:41909, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=41955, infoSecurePort=0, ipcPort=44101, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:09,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:09,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:10,171 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-09T11:24:10,174 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T11:24:10,176 ERROR [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:10,176 WARN [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:10,176 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C34839%2C1733743435627:(num 1733743436101) roll requested 2024-12-09T11:24:10,177 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:10,183 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 newFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:10,183 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:10,183 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:10,183 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:10,184 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:10,184 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:10,184 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:10,184 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:10,184 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:10,185 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:24:10,185 WARN [IPC Server handler 1 on default port 34165 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741832_1015 2024-12-09T11:24:10,185 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 after 0ms 2024-12-09T11:24:10,187 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41955:41955),(127.0.0.1/127.0.0.1:46125:46125)] 2024-12-09T11:24:10,187 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 is not closed yet, will try archiving it next time 2024-12-09T11:24:10,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:10,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:11,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:11,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:12,191 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T11:24:12,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:12,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:13,073 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741832_1015: GenerationStamp not matched, existing replica is blk_1073741832_1008 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T11:24:13,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:13,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:14,186 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 after 4001ms 2024-12-09T11:24:14,193 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:14,194 WARN [DataStreamer for file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 block BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41909,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK], DatanodeInfoWithStorage[127.0.0.1:40731,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41909,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]) is bad. 2024-12-09T11:24:14,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:43142 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41909:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43142 dst: /127.0.0.1:41909 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:14,195 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:57142 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40731:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57142 dst: /127.0.0.1:40731 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:14,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e1d853e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:14,198 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32d80742{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:14,198 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:14,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bd4204e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:14,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bae15bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:14,199 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:24:14,199 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:24:14,199 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:24:14,199 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-503541886-172.17.0.3-1733743434659 (Datanode Uuid 6642d444-cecf-474d-9026-d334659a9cd7) service to localhost/127.0.0.1:34165 2024-12-09T11:24:14,200 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data1/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:14,201 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:24:14,201 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data2/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:14,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:14,213 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:14,214 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:14,214 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:14,214 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:24:14,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68b5b1a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:14,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@527fc9f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:14,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:14,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:14,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@714162c1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-42731-hadoop-hdfs-3_4_1-tests_jar-_-any-10565537530077684718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:14,338 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@686acba3{HTTP/1.1, (http/1.1)}{localhost:42731} 2024-12-09T11:24:14,338 INFO [Time-limited test {}] server.Server(415): Started @175611ms 2024-12-09T11:24:14,340 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:24:14,371 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:14,371 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_388877119_22 at /127.0.0.1:57168 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:40731:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57168 dst: /127.0.0.1:40731 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:14,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fbde2b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:14,384 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7dc1dfbb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:14,384 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:14,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26bb2a2d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:14,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@292d948{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:14,387 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:24:14,387 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:24:14,387 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:24:14,387 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-503541886-172.17.0.3-1733743434659 (Datanode Uuid 70ace615-e940-44cc-b038-32c5947ae7b5) service to localhost/127.0.0.1:34165 2024-12-09T11:24:14,388 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data3/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:14,388 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data4/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:14,437 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:24:14,448 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:14,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:14,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:14,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:14,455 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:24:14,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c9c9504{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:14,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28735816{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:14,501 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:24:14,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ccb69bca0619621 with lease ID 0xad94f03b2dac8768: from storage DS-65b26615-0165-4436-8104-1fa8ecfc7d52 node DatanodeRegistration(127.0.0.1:45225, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=45681, infoSecurePort=0, ipcPort=44645, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:14,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ccb69bca0619621 with lease ID 0xad94f03b2dac8768: from storage DS-d2b16396-1fe5-45af-8ae0-976a250c901c node DatanodeRegistration(127.0.0.1:45225, datanodeUuid=6642d444-cecf-474d-9026-d334659a9cd7, infoPort=45681, infoSecurePort=0, ipcPort=44645, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:14,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@558f8fa7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/java.io.tmpdir/jetty-localhost-35725-hadoop-hdfs-3_4_1-tests_jar-_-any-1791944391307251941/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:14,579 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63150acd{HTTP/1.1, (http/1.1)}{localhost:35725} 2024-12-09T11:24:14,579 INFO [Time-limited test {}] server.Server(415): Started @175853ms 2024-12-09T11:24:14,581 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:24:14,672 WARN [Thread-1445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:24:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3c95e755b9d02f0 with lease ID 0xad94f03b2dac8769: from storage DS-7e2caeb4-bdb9-4978-8354-5317be0306ea node DatanodeRegistration(127.0.0.1:39615, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=45833, infoSecurePort=0, ipcPort=33417, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3c95e755b9d02f0 with lease ID 0xad94f03b2dac8769: from storage DS-a0113b89-c40b-46d7-b92d-31369bd2748e node DatanodeRegistration(127.0.0.1:39615, datanodeUuid=70ace615-e940-44cc-b038-32c5947ae7b5, infoPort=45833, infoSecurePort=0, ipcPort=33417, storageInfo=lv=-57;cid=testClusterID;nsid=61773062;c=1733743434659), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:15,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:15,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:15,599 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-09T11:24:15,601 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T11:24:15,603 ERROR [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40731,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:15,603 WARN [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40731,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:15,603 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C34839%2C1733743435627:(num 1733743450176) roll requested 2024-12-09T11:24:15,603 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.1733743455603 2024-12-09T11:24:15,610 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 newFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 2024-12-09T11:24:15,610 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:15,610 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:15,610 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:15,610 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:15,610 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:15,611 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 2024-12-09T11:24:15,611 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40731,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:15,611 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40731,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:15,611 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:15,611 WARN [IPC Server handler 0 on default port 34165 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-09T11:24:15,612 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 after 1ms 2024-12-09T11:24:15,614 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45833:45833),(127.0.0.1/127.0.0.1:45681:45681)] 2024-12-09T11:24:15,614 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 is not closed yet, will try archiving it next time 2024-12-09T11:24:15,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741837_1020 (size=2427) 2024-12-09T11:24:16,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:16,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:16,504 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T11:24:17,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:17,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:17,615 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:17,621 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 newFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:17,622 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:17,622 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:17,622 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:17,622 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:17,622 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:17,622 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:17,623 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45833:45833),(127.0.0.1/127.0.0.1:45681:45681)] 2024-12-09T11:24:17,623 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 is not closed yet, will try archiving it next time 2024-12-09T11:24:17,623 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 is not closed yet, will try archiving it next time 2024-12-09T11:24:17,624 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:24:17,624 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:24:17,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741838_1019 (size=1264) 2024-12-09T11:24:17,624 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 after 0ms 2024-12-09T11:24:17,624 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:24:17,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741838_1019 (size=1264) 2024-12-09T11:24:17,625 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 is not closed yet, will try archiving it next time 2024-12-09T11:24:17,634 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733743437064/Put/vlen=218/seqid=0] 2024-12-09T11:24:17,634 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733743446774/Put/vlen=1045/seqid=0] 2024-12-09T11:24:17,634 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743436101 2024-12-09T11:24:17,634 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:17,635 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:17,635 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 after 0ms 2024-12-09T11:24:17,635 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:17,639 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733743450176/Put/vlen=1045/seqid=0] 2024-12-09T11:24:17,639 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733743452192/Put/vlen=1045/seqid=0] 2024-12-09T11:24:17,639 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 2024-12-09T11:24:17,639 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 2024-12-09T11:24:17,639 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 2024-12-09T11:24:17,640 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 after 1ms 2024-12-09T11:24:17,640 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743455603 2024-12-09T11:24:17,643 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733743455602/Put/vlen=1045/seqid=0] 2024-12-09T11:24:17,643 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:17,643 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:17,643 WARN [IPC Server handler 3 on default port 34165 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-09T11:24:17,643 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 after 0ms 2024-12-09T11:24:18,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:18,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:18,508 WARN [ResponseProcessor for block BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:18,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1705327480_22 at /127.0.0.1:33394 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45225:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33394 dst: /127.0.0.1:45225 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:18,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1705327480_22 at /127.0.0.1:44394 [Receiving block BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44394 dst: /127.0.0.1:39615 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39615 remote=/127.0.0.1:44394]. Total timeout mills is 60000, 59114 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:18,508 WARN [DataStreamer for file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 block BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39615,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK], DatanodeInfoWithStorage[127.0.0.1:45225,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39615,DS-7e2caeb4-bdb9-4978-8354-5317be0306ea,DISK]) is bad. 2024-12-09T11:24:18,509 WARN [DataStreamer for file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 block BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:18,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741839_1022 (size=85) 2024-12-09T11:24:19,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:19,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:19,613 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743450176 after 4002ms 2024-12-09T11:24:20,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:20,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:21,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:21,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:21,644 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 after 4001ms 2024-12-09T11:24:21,644 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:21,648 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:21,648 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-09T11:24:21,649 ERROR [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:21,649 WARN [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:21,649 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C34839%2C1733743435627.meta:.meta(num 1733743436629) roll requested 2024-12-09T11:24:21,650 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.meta.1733743461649.meta 2024-12-09T11:24:21,655 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:21,655 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:21,655 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:21,655 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:21,655 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:21,655 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743461649.meta 2024-12-09T11:24:21,656 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:21,656 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:21,656 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta 2024-12-09T11:24:21,656 WARN [IPC Server handler 1 on default port 34165 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-12-09T11:24:21,656 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta after 0ms 2024-12-09T11:24:21,660 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45833:45833),(127.0.0.1/127.0.0.1:45681:45681)] 2024-12-09T11:24:21,660 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta is not closed yet, will try archiving it next time 2024-12-09T11:24:21,677 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/info/da98570c3b35458bad7b07e814d5e6ee is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674./info:regioninfo/1733743437068/Put/seqid=0 2024-12-09T11:24:21,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741841_1025 (size=7125) 2024-12-09T11:24:21,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741841_1025 (size=7125) 2024-12-09T11:24:21,693 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/info/da98570c3b35458bad7b07e814d5e6ee 2024-12-09T11:24:21,720 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/ns/259065c9f1d542b9b976486dd70a1d7f is 43, key is default/ns:d/1733743436669/Put/seqid=0 2024-12-09T11:24:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741842_1026 (size=5153) 2024-12-09T11:24:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741842_1026 (size=5153) 2024-12-09T11:24:22,127 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/ns/259065c9f1d542b9b976486dd70a1d7f 2024-12-09T11:24:22,153 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/table/9d57379b956d4aa7bc492645f74efa25 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733743437080/Put/seqid=0 2024-12-09T11:24:22,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741843_1027 (size=5438) 2024-12-09T11:24:22,189 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/table/9d57379b956d4aa7bc492645f74efa25 2024-12-09T11:24:22,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741843_1027 (size=5438) 2024-12-09T11:24:22,203 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/info/da98570c3b35458bad7b07e814d5e6ee as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/info/da98570c3b35458bad7b07e814d5e6ee 2024-12-09T11:24:22,211 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/info/da98570c3b35458bad7b07e814d5e6ee, entries=10, sequenceid=11, filesize=7.0 K 2024-12-09T11:24:22,212 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/ns/259065c9f1d542b9b976486dd70a1d7f as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/ns/259065c9f1d542b9b976486dd70a1d7f 2024-12-09T11:24:22,223 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/ns/259065c9f1d542b9b976486dd70a1d7f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T11:24:22,225 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/.tmp/table/9d57379b956d4aa7bc492645f74efa25 as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/table/9d57379b956d4aa7bc492645f74efa25 2024-12-09T11:24:22,233 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/table/9d57379b956d4aa7bc492645f74efa25, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T11:24:22,239 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 591ms, sequenceid=11, compaction requested=false 2024-12-09T11:24:22,239 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T11:24:22,239 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 990ddf2ec505ddbcb6f79c2362cb9674 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T11:24:22,242 ERROR [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:22,243 WARN [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd-prefix:2dff3a36d44f,34839,1733743435627 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:22,243 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C34839%2C1733743435627:(num 1733743457615) roll requested 2024-12-09T11:24:22,244 INFO [regionserver/2dff3a36d44f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34839%2C1733743435627.1733743462243 2024-12-09T11:24:22,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:22,269 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 newFile=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743462243 2024-12-09T11:24:22,269 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,270 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,270 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,270 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,270 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,271 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743462243 2024-12-09T11:24:22,271 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:22,271 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-503541886-172.17.0.3-1733743434659:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:22,272 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:22,273 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 after 1ms 2024-12-09T11:24:22,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:22,279 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 to hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs/2dff3a36d44f%2C34839%2C1733743435627.1733743457615 2024-12-09T11:24:22,279 DEBUG [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45681:45681),(127.0.0.1/127.0.0.1:45833:45833)] 2024-12-09T11:24:22,305 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/.tmp/info/3339b6cc2afa4b92b54347b238c01c6f is 1080, key is row1002/info:/1733743446774/Put/seqid=0 2024-12-09T11:24:22,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741845_1029 (size=9270) 2024-12-09T11:24:22,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741845_1029 (size=9270) 2024-12-09T11:24:22,321 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/.tmp/info/3339b6cc2afa4b92b54347b238c01c6f 2024-12-09T11:24:22,344 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/.tmp/info/3339b6cc2afa4b92b54347b238c01c6f as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/info/3339b6cc2afa4b92b54347b238c01c6f 2024-12-09T11:24:22,353 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/info/3339b6cc2afa4b92b54347b238c01c6f, entries=4, sequenceid=8, filesize=9.1 K 2024-12-09T11:24:22,358 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 990ddf2ec505ddbcb6f79c2362cb9674 in 119ms, sequenceid=8, compaction requested=false 2024-12-09T11:24:22,358 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 990ddf2ec505ddbcb6f79c2362cb9674: 2024-12-09T11:24:22,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:24:22,364 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:24:22,364 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:24:22,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:24:22,365 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:24:22,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:24:22,365 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:24:22,365 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2001403329, stopped=false 2024-12-09T11:24:22,365 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,38387,1733743435551 2024-12-09T11:24:22,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:24:22,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:22,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:24:22,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:22,368 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:24:22,368 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:24:22,368 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:24:22,368 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:24:22,368 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:24:22,368 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:24:22,369 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,34839,1733743435627' ***** 2024-12-09T11:24:22,369 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:24:22,369 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:24:22,369 INFO [RS:0;2dff3a36d44f:34839 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:24:22,369 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:24:22,369 INFO [RS:0;2dff3a36d44f:34839 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:24:22,369 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(3091): Received CLOSE for 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,34839,1733743435627 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:34839. 2024-12-09T11:24:22,370 DEBUG [RS:0;2dff3a36d44f:34839 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:24:22,370 DEBUG [RS:0;2dff3a36d44f:34839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:24:22,370 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 990ddf2ec505ddbcb6f79c2362cb9674, disabling compactions & flushes 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:24:22,370 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:24:22,370 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:24:22,370 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:24:22,370 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. after waiting 0 ms 2024-12-09T11:24:22,370 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:24:22,374 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:24:22,374 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 990ddf2ec505ddbcb6f79c2362cb9674=TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674.} 2024-12-09T11:24:22,374 DEBUG [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 990ddf2ec505ddbcb6f79c2362cb9674 2024-12-09T11:24:22,378 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:24:22,378 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:24:22,379 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:24:22,379 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:24:22,379 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:24:22,382 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/default/TestLogRolling-testLogRollOnPipelineRestart/990ddf2ec505ddbcb6f79c2362cb9674/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-09T11:24:22,383 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:24:22,383 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 990ddf2ec505ddbcb6f79c2362cb9674: Waiting for close lock at 1733743462370Running coprocessor pre-close hooks at 1733743462370Disabling compacts and flushes for region at 1733743462370Disabling writes for close at 1733743462370Writing region close event to WAL at 1733743462375 (+5 ms)Running coprocessor post-close hooks at 1733743462382 (+7 ms)Closed at 1733743462382 2024-12-09T11:24:22,383 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733743436708.990ddf2ec505ddbcb6f79c2362cb9674. 2024-12-09T11:24:22,395 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T11:24:22,395 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:24:22,395 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:24:22,395 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743462378Running coprocessor pre-close hooks at 1733743462378Disabling compacts and flushes for region at 1733743462378Disabling writes for close at 1733743462379 (+1 ms)Writing region close event to WAL at 1733743462390 (+11 ms)Running coprocessor post-close hooks at 1733743462395 (+5 ms)Closed at 1733743462395 2024-12-09T11:24:22,396 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:24:22,575 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,34839,1733743435627; all regions closed. 2024-12-09T11:24:22,575 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,575 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,576 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,576 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,576 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:22,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741840_1023 (size=825) 2024-12-09T11:24:22,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741840_1023 (size=825) 2024-12-09T11:24:22,978 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:24:22,979 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:24:23,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:23,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:23,966 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:24:24,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:24,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:25,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:25,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:25,530 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:24:25,657 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta after 4001ms 2024-12-09T11:24:25,658 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/WALs/2dff3a36d44f,34839,1733743435627/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta to hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs/2dff3a36d44f%2C34839%2C1733743435627.meta.1733743436629.meta 2024-12-09T11:24:25,661 DEBUG [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs 2024-12-09T11:24:25,661 INFO [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C34839%2C1733743435627.meta:.meta(num 1733743461649) 2024-12-09T11:24:25,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,663 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741844_1028 (size=1162) 2024-12-09T11:24:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741844_1028 (size=1162) 2024-12-09T11:24:25,671 DEBUG [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs 2024-12-09T11:24:25,671 INFO [RS:0;2dff3a36d44f:34839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C34839%2C1733743435627:(num 1733743462243) 2024-12-09T11:24:25,671 DEBUG [RS:0;2dff3a36d44f:34839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:24:25,671 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:24:25,671 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:24:25,671 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T11:24:25,671 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:24:25,671 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:24:25,672 INFO [RS:0;2dff3a36d44f:34839 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34839 2024-12-09T11:24:25,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:24:25,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,34839,1733743435627 2024-12-09T11:24:25,674 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:24:25,675 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,34839,1733743435627] 2024-12-09T11:24:25,676 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T11:24:25,678 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,34839,1733743435627 already deleted, retry=false 2024-12-09T11:24:25,678 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,34839,1733743435627 expired; onlineServers=0 2024-12-09T11:24:25,678 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,38387,1733743435551' ***** 2024-12-09T11:24:25,678 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:24:25,678 INFO [M:0;2dff3a36d44f:38387 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:24:25,678 INFO [M:0;2dff3a36d44f:38387 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:24:25,679 DEBUG [M:0;2dff3a36d44f:38387 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:24:25,679 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:24:25,679 DEBUG [M:0;2dff3a36d44f:38387 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:24:25,679 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743435846 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743435846,5,FailOnTimeoutGroup] 2024-12-09T11:24:25,679 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743435842 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743435842,5,FailOnTimeoutGroup] 2024-12-09T11:24:25,679 INFO [M:0;2dff3a36d44f:38387 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:24:25,679 INFO [M:0;2dff3a36d44f:38387 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:24:25,679 DEBUG [M:0;2dff3a36d44f:38387 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:24:25,679 INFO [M:0;2dff3a36d44f:38387 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:24:25,679 INFO [M:0;2dff3a36d44f:38387 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:24:25,679 INFO [M:0;2dff3a36d44f:38387 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:24:25,680 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:24:25,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:24:25,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:25,681 DEBUG [M:0;2dff3a36d44f:38387 {}] zookeeper.ZKUtil(347): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:24:25,681 WARN [M:0;2dff3a36d44f:38387 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:24:25,682 INFO [M:0;2dff3a36d44f:38387 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/.lastflushedseqids 2024-12-09T11:24:25,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741846_1030 (size=120) 2024-12-09T11:24:25,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741846_1030 (size=120) 2024-12-09T11:24:25,698 INFO [M:0;2dff3a36d44f:38387 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:24:25,698 INFO [M:0;2dff3a36d44f:38387 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:24:25,698 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:24:25,698 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:25,698 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:25,698 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:24:25,698 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:25,699 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-12-09T11:24:25,699 ERROR [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData-prefix:2dff3a36d44f,38387,1733743435551 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:25,699 WARN [FSHLog-0-hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData-prefix:2dff3a36d44f,38387,1733743435551 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:25,699 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 2dff3a36d44f%2C38387%2C1733743435551:(num 1733743435759) roll requested 2024-12-09T11:24:25,699 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C38387%2C1733743435551.1733743465699 2024-12-09T11:24:25,714 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,714 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,714 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,714 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,714 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,715 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743465699 2024-12-09T11:24:25,722 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:25,723 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44301,DS-65b26615-0165-4436-8104-1fa8ecfc7d52,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T11:24:25,723 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 2024-12-09T11:24:25,724 WARN [IPC Server handler 2 on default port 34165 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-12-09T11:24:25,724 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 after 1ms 2024-12-09T11:24:25,730 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45681:45681),(127.0.0.1/127.0.0.1:45833:45833)] 2024-12-09T11:24:25,730 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 is not closed yet, will try archiving it next time 2024-12-09T11:24:25,747 DEBUG [M:0;2dff3a36d44f:38387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/905773da238044c58268f459661d4279 is 82, key is hbase:meta,,1/info:regioninfo/1733743436652/Put/seqid=0 2024-12-09T11:24:25,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741848_1033 (size=5672) 2024-12-09T11:24:25,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741848_1033 (size=5672) 2024-12-09T11:24:25,755 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/905773da238044c58268f459661d4279 2024-12-09T11:24:25,776 DEBUG [M:0;2dff3a36d44f:38387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c7818c8fadec4896a948ab8f5997f80d is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733743437084/Put/seqid=0 2024-12-09T11:24:25,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:24:25,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34839-0x1012aecef2d0001, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:24:25,778 INFO [RS:0;2dff3a36d44f:34839 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:24:25,778 INFO [RS:0;2dff3a36d44f:34839 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,34839,1733743435627; zookeeper connection closed. 2024-12-09T11:24:25,782 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@520f8971 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@520f8971 2024-12-09T11:24:25,783 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T11:24:25,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741849_1034 (size=6117) 2024-12-09T11:24:25,793 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c7818c8fadec4896a948ab8f5997f80d 2024-12-09T11:24:25,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741849_1034 (size=6117) 2024-12-09T11:24:25,819 DEBUG [M:0;2dff3a36d44f:38387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08a7292ee0744b328ed803711a652218 is 69, key is 2dff3a36d44f,34839,1733743435627/rs:state/1733743435930/Put/seqid=0 2024-12-09T11:24:25,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741850_1035 (size=5156) 2024-12-09T11:24:25,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741850_1035 (size=5156) 2024-12-09T11:24:25,824 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08a7292ee0744b328ed803711a652218 2024-12-09T11:24:25,846 DEBUG [M:0;2dff3a36d44f:38387 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fa9a995c508b4140a268686a35eaf56a is 52, key is load_balancer_on/state:d/1733743436697/Put/seqid=0 2024-12-09T11:24:25,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741851_1036 (size=5056) 2024-12-09T11:24:25,856 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fa9a995c508b4140a268686a35eaf56a 2024-12-09T11:24:25,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741851_1036 (size=5056) 2024-12-09T11:24:25,863 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/905773da238044c58268f459661d4279 as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/905773da238044c58268f459661d4279 2024-12-09T11:24:25,869 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/905773da238044c58268f459661d4279, entries=8, sequenceid=56, filesize=5.5 K 2024-12-09T11:24:25,869 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c7818c8fadec4896a948ab8f5997f80d as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c7818c8fadec4896a948ab8f5997f80d 2024-12-09T11:24:25,882 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c7818c8fadec4896a948ab8f5997f80d, entries=6, sequenceid=56, filesize=6.0 K 2024-12-09T11:24:25,883 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/08a7292ee0744b328ed803711a652218 as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08a7292ee0744b328ed803711a652218 2024-12-09T11:24:25,888 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/08a7292ee0744b328ed803711a652218, entries=1, sequenceid=56, filesize=5.0 K 2024-12-09T11:24:25,889 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fa9a995c508b4140a268686a35eaf56a as hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fa9a995c508b4140a268686a35eaf56a 2024-12-09T11:24:25,894 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fa9a995c508b4140a268686a35eaf56a, entries=1, sequenceid=56, filesize=4.9 K 2024-12-09T11:24:25,898 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 200ms, sequenceid=56, compaction requested=false 2024-12-09T11:24:25,904 INFO [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:25,904 DEBUG [M:0;2dff3a36d44f:38387 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743465698Disabling compacts and flushes for region at 1733743465698Disabling writes for close at 1733743465698Obtaining lock to block concurrent updates at 1733743465699 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743465699Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1733743465699Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743465730 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743465730Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743465746 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743465746Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743465760 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743465776 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743465776Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743465800 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743465818 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743465818Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743465830 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743465845 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743465845Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e2a9ffa: reopening flushed file at 1733743465862 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2cfd0eb0: reopening flushed file at 1733743465869 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46955ed1: reopening flushed file at 1733743465882 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5eb55b0f: reopening flushed file at 1733743465888 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 200ms, sequenceid=56, compaction requested=false at 1733743465898 (+10 ms)Writing region close event to WAL at 1733743465904 (+6 ms)Closed at 1733743465904 2024-12-09T11:24:25,904 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,904 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,905 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:24:25,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39615 is added to blk_1073741847_1031 (size=757) 2024-12-09T11:24:25,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45225 is added to blk_1073741847_1031 (size=757) 2024-12-09T11:24:26,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:24:26,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:24:26,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T11:24:26,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T11:24:26,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:26,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:27,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:27,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:27,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,909 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:24:27,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:27,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:28,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:28,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:28,676 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T11:24:29,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:29,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:29,725 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 after 4002ms 2024-12-09T11:24:29,726 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/WALs/2dff3a36d44f,38387,1733743435551/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 to hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/oldWALs/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 2024-12-09T11:24:29,728 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/MasterData/oldWALs/2dff3a36d44f%2C38387%2C1733743435551.1733743435759 to hdfs://localhost:34165/user/jenkins/test-data/4e3ff876-d697-3495-dabc-71adc3ef22bd/oldWALs/2dff3a36d44f%2C38387%2C1733743435551.1733743435759$masterlocalwal$ 2024-12-09T11:24:29,729 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:24:29,729 INFO [M:0;2dff3a36d44f:38387 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:24:29,729 INFO [M:0;2dff3a36d44f:38387 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38387 2024-12-09T11:24:29,729 INFO [M:0;2dff3a36d44f:38387 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:24:29,831 INFO [M:0;2dff3a36d44f:38387 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:24:29,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:24:29,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38387-0x1012aecef2d0000, quorum=127.0.0.1:53398, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:24:29,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@558f8fa7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:29,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63150acd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:29,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:29,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28735816{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:29,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c9c9504{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:29,839 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:24:29,839 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-503541886-172.17.0.3-1733743434659 (Datanode Uuid 70ace615-e940-44cc-b038-32c5947ae7b5) service to localhost/127.0.0.1:34165 2024-12-09T11:24:29,839 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:24:29,839 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:24:29,840 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data3/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:29,840 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data4/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:29,841 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:24:29,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@714162c1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:29,849 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@686acba3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:29,849 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:29,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@527fc9f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:29,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68b5b1a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:29,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:24:29,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:24:29,862 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:24:29,862 WARN [BP-503541886-172.17.0.3-1733743434659 heartbeating to localhost/127.0.0.1:34165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-503541886-172.17.0.3-1733743434659 (Datanode Uuid 6642d444-cecf-474d-9026-d334659a9cd7) service to localhost/127.0.0.1:34165 2024-12-09T11:24:29,863 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data1/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:29,863 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/cluster_d49bd5fa-e57a-431f-45d4-fa260de257de/data/data2/current/BP-503541886-172.17.0.3-1733743434659 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:24:29,863 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:24:29,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@531c70ff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:24:29,872 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e129549{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:24:29,872 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:24:29,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a073962{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:24:29,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@445db56c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir/,STOPPED} 2024-12-09T11:24:29,883 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:24:29,904 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:24:29,915 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 154) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34165 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34165 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34165 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34165 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:34165 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=371 (was 417), ProcessCount=11 (was 11), AvailableMemoryMB=1246 (was 1324) 2024-12-09T11:24:29,924 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=371, ProcessCount=11, AvailableMemoryMB=1246 2024-12-09T11:24:29,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:24:29,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.log.dir so I do NOT create it in target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1 2024-12-09T11:24:29,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dfa088f3-5e00-c74b-78b5-ff6b86cec274/hadoop.tmp.dir so I do NOT create it in target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1 2024-12-09T11:24:29,924 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284, deleteOnExit=true 2024-12-09T11:24:29,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/test.cache.data in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:24:29,925 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:24:29,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:24:29,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:24:29,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:24:29,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:24:29,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:24:29,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:24:29,938 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:24:30,011 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:30,014 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:30,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:30,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:30,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:24:30,017 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:30,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@602005e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:30,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21aba41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:30,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1411d2ea{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/java.io.tmpdir/jetty-localhost-45503-hadoop-hdfs-3_4_1-tests_jar-_-any-15802364380619764370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:24:30,134 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67fc8227{HTTP/1.1, (http/1.1)}{localhost:45503} 2024-12-09T11:24:30,134 INFO [Time-limited test {}] server.Server(415): Started @191407ms 2024-12-09T11:24:30,147 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:24:30,202 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:30,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:30,214 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:30,214 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:30,214 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:24:30,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@239cfce9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:30,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22d895dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:30,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:30,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:30,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50c25230{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/java.io.tmpdir/jetty-localhost-39967-hadoop-hdfs-3_4_1-tests_jar-_-any-16530062025505098306/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:30,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1736cbce{HTTP/1.1, (http/1.1)}{localhost:39967} 2024-12-09T11:24:30,339 INFO [Time-limited test {}] server.Server(415): Started @191612ms 2024-12-09T11:24:30,341 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:24:30,371 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:24:30,374 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:24:30,375 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:24:30,375 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:24:30,375 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:24:30,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@351a06a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:24:30,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5301759b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:24:30,457 WARN [Thread-1639 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data1/current/BP-1691064892-172.17.0.3-1733743469955/current, will proceed with Du for space computation calculation, 2024-12-09T11:24:30,458 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data2/current/BP-1691064892-172.17.0.3-1733743469955/current, will proceed with Du for space computation calculation, 2024-12-09T11:24:30,489 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:24:30,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5cee5f5fa1685286 with lease ID 0x10298743158d934f: Processing first storage report for DS-eb1b2779-3727-4f98-8667-4776fe94d9c1 from datanode DatanodeRegistration(127.0.0.1:43519, datanodeUuid=743c9a7a-e6d2-43e0-bfb6-98be5996fd9c, infoPort=34049, infoSecurePort=0, ipcPort=40409, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955) 2024-12-09T11:24:30,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5cee5f5fa1685286 with lease ID 0x10298743158d934f: from storage DS-eb1b2779-3727-4f98-8667-4776fe94d9c1 node DatanodeRegistration(127.0.0.1:43519, datanodeUuid=743c9a7a-e6d2-43e0-bfb6-98be5996fd9c, infoPort=34049, infoSecurePort=0, ipcPort=40409, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:30,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5cee5f5fa1685286 with lease ID 0x10298743158d934f: Processing first storage report for DS-b6758a01-f70e-42ec-ba8e-89341ff3e5e8 from datanode DatanodeRegistration(127.0.0.1:43519, datanodeUuid=743c9a7a-e6d2-43e0-bfb6-98be5996fd9c, infoPort=34049, infoSecurePort=0, ipcPort=40409, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955) 2024-12-09T11:24:30,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5cee5f5fa1685286 with lease ID 0x10298743158d934f: from storage DS-b6758a01-f70e-42ec-ba8e-89341ff3e5e8 node DatanodeRegistration(127.0.0.1:43519, datanodeUuid=743c9a7a-e6d2-43e0-bfb6-98be5996fd9c, infoPort=34049, infoSecurePort=0, ipcPort=40409, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:30,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@796941bb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/java.io.tmpdir/jetty-localhost-35507-hadoop-hdfs-3_4_1-tests_jar-_-any-7768513487967673507/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:24:30,514 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a125e15{HTTP/1.1, (http/1.1)}{localhost:35507} 2024-12-09T11:24:30,514 INFO [Time-limited test {}] server.Server(415): Started @191787ms 2024-12-09T11:24:30,515 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:24:30,615 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data3/current/BP-1691064892-172.17.0.3-1733743469955/current, will proceed with Du for space computation calculation, 2024-12-09T11:24:30,615 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data4/current/BP-1691064892-172.17.0.3-1733743469955/current, will proceed with Du for space computation calculation, 2024-12-09T11:24:30,632 WARN [Thread-1654 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:24:30,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f8f8c007fff6b33 with lease ID 0x10298743158d9350: Processing first storage report for DS-836ffd37-3b73-45f5-b5a9-53ee83708e19 from datanode DatanodeRegistration(127.0.0.1:36551, datanodeUuid=68172ff3-9d70-454e-9f25-f60cc4fa0820, infoPort=44369, infoSecurePort=0, ipcPort=38161, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955) 2024-12-09T11:24:30,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f8f8c007fff6b33 with lease ID 0x10298743158d9350: from storage DS-836ffd37-3b73-45f5-b5a9-53ee83708e19 node DatanodeRegistration(127.0.0.1:36551, datanodeUuid=68172ff3-9d70-454e-9f25-f60cc4fa0820, infoPort=44369, infoSecurePort=0, ipcPort=38161, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:30,634 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f8f8c007fff6b33 with lease ID 0x10298743158d9350: Processing first storage report for DS-f67db169-efab-462d-84cc-3675be69c39b from datanode DatanodeRegistration(127.0.0.1:36551, datanodeUuid=68172ff3-9d70-454e-9f25-f60cc4fa0820, infoPort=44369, infoSecurePort=0, ipcPort=38161, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955) 2024-12-09T11:24:30,634 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f8f8c007fff6b33 with lease ID 0x10298743158d9350: from storage DS-f67db169-efab-462d-84cc-3675be69c39b node DatanodeRegistration(127.0.0.1:36551, datanodeUuid=68172ff3-9d70-454e-9f25-f60cc4fa0820, infoPort=44369, infoSecurePort=0, ipcPort=38161, storageInfo=lv=-57;cid=testClusterID;nsid=1905805054;c=1733743469955), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:24:30,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1 2024-12-09T11:24:30,647 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/zookeeper_0, clientPort=52369, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:24:30,648 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52369 2024-12-09T11:24:30,648 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,650 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:24:30,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:24:30,658 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8 with version=8 2024-12-09T11:24:30,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:24:30,660 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:24:30,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:24:30,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:24:30,660 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:24:30,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:24:30,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:24:30,660 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:24:30,661 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:24:30,661 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42711 2024-12-09T11:24:30,662 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42711 connecting to ZooKeeper ensemble=127.0.0.1:52369 2024-12-09T11:24:30,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:427110x0, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:24:30,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42711-0x1012aed78570000 connected 2024-12-09T11:24:30,686 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,689 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:24:30,689 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8, hbase.cluster.distributed=false 2024-12-09T11:24:30,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:24:30,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42711 2024-12-09T11:24:30,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42711 2024-12-09T11:24:30,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42711 2024-12-09T11:24:30,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42711 2024-12-09T11:24:30,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42711 2024-12-09T11:24:30,706 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:24:30,706 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:24:30,707 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34951 2024-12-09T11:24:30,708 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34951 connecting to ZooKeeper ensemble=127.0.0.1:52369 2024-12-09T11:24:30,709 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349510x0, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:24:30,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:349510x0, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:24:30,714 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34951-0x1012aed78570001 connected 2024-12-09T11:24:30,715 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:24:30,715 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:24:30,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:24:30,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:24:30,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34951 2024-12-09T11:24:30,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34951 2024-12-09T11:24:30,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34951 2024-12-09T11:24:30,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34951 2024-12-09T11:24:30,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34951 2024-12-09T11:24:30,731 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:42711 2024-12-09T11:24:30,731 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:24:30,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:24:30,735 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:24:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,738 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:24:30,741 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,42711,1733743470660 from backup master directory 2024-12-09T11:24:30,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:24:30,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:24:30,744 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:24:30,744 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,749 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/hbase.id] with ID: c1d0c6eb-0c59-4e2d-8a4d-6e64727cfd29 2024-12-09T11:24:30,749 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/.tmp/hbase.id 2024-12-09T11:24:30,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:24:30,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:24:30,760 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/.tmp/hbase.id]:[hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/hbase.id] 2024-12-09T11:24:30,770 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:30,771 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:24:30,772 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T11:24:30,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:24:30,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:24:30,789 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:24:30,790 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:24:30,795 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:24:30,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:24:30,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:24:30,805 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store 2024-12-09T11:24:30,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:24:30,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:24:30,822 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:24:30,822 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:24:30,823 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:30,823 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:30,823 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:24:30,823 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:30,823 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:24:30,823 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743470822Disabling compacts and flushes for region at 1733743470822Disabling writes for close at 1733743470823 (+1 ms)Writing region close event to WAL at 1733743470823Closed at 1733743470823 2024-12-09T11:24:30,824 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/.initializing 2024-12-09T11:24:30,824 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/WALs/2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,827 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C42711%2C1733743470660, suffix=, logDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/WALs/2dff3a36d44f,42711,1733743470660, archiveDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/oldWALs, maxLogs=10 2024-12-09T11:24:30,827 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C42711%2C1733743470660.1733743470827 2024-12-09T11:24:30,835 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/WALs/2dff3a36d44f,42711,1733743470660/2dff3a36d44f%2C42711%2C1733743470660.1733743470827 2024-12-09T11:24:30,836 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44369:44369),(127.0.0.1/127.0.0.1:34049:34049)] 2024-12-09T11:24:30,837 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:24:30,837 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:24:30,837 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,837 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:24:30,840 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:30,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:24:30,841 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:24:30,841 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:24:30,842 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:24:30,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:24:30,844 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:24:30,844 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,845 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,845 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,846 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,846 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,847 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:24:30,848 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:24:30,850 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:24:30,850 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875913, jitterRate=0.11378206312656403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:24:30,851 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743470837Initializing all the Stores at 1733743470838 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743470838Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743470838Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743470838Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743470838Cleaning up temporary data from old regions at 1733743470846 (+8 ms)Region opened successfully at 1733743470851 (+5 ms) 2024-12-09T11:24:30,851 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:24:30,854 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41400a82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:24:30,855 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:24:30,855 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:24:30,855 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:24:30,855 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:24:30,856 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T11:24:30,856 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T11:24:30,856 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:24:30,860 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:24:30,861 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:24:30,862 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:24:30,862 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:24:30,863 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:24:30,864 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:24:30,865 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:24:30,866 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:24:30,868 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:24:30,869 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:24:30,871 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:24:30,873 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:24:30,874 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:24:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:24:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:24:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,877 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,42711,1733743470660, sessionid=0x1012aed78570000, setting cluster-up flag (Was=false) 2024-12-09T11:24:30,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,886 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:24:30,887 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:30,896 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:24:30,897 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:30,898 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:24:30,900 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:24:30,900 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:24:30,900 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:24:30,900 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,42711,1733743470660 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:24:30,901 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:24:30,902 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,904 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:24:30,904 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:24:30,904 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743500904 2024-12-09T11:24:30,904 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:24:30,904 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:24:30,905 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:24:30,905 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:24:30,905 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:24:30,905 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:24:30,905 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,905 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:24:30,906 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,909 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:24:30,909 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:24:30,909 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:24:30,910 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:24:30,910 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:24:30,911 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743470910,5,FailOnTimeoutGroup] 2024-12-09T11:24:30,911 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743470911,5,FailOnTimeoutGroup] 2024-12-09T11:24:30,911 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,911 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:24:30,911 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,911 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:24:30,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:24:30,914 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:24:30,914 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8 2024-12-09T11:24:30,920 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(746): ClusterId : c1d0c6eb-0c59-4e2d-8a4d-6e64727cfd29 2024-12-09T11:24:30,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:24:30,920 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:24:30,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:24:30,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:24:30,922 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:24:30,924 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:24:30,924 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:24:30,926 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:24:30,926 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:24:30,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,926 DEBUG [RS:0;2dff3a36d44f:34951 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cc6fcb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:24:30,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:30,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:24:30,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:24:30,928 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:30,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:24:30,930 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:24:30,930 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:30,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:24:30,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:24:30,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:30,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:30,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:24:30,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740 2024-12-09T11:24:30,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740 2024-12-09T11:24:30,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:24:30,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:24:30,935 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:24:30,936 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:24:30,938 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:24:30,939 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811373, jitterRate=0.03171522915363312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:24:30,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743470921Initializing all the Stores at 1733743470921Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743470921Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743470922 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743470922Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743470922Cleaning up temporary data from old regions at 1733743470935 (+13 ms)Region opened successfully at 1733743470939 (+4 ms) 2024-12-09T11:24:30,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:24:30,939 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:24:30,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:24:30,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:24:30,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:24:30,940 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:24:30,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743470939Disabling compacts and flushes for region at 1733743470939Disabling writes for close at 1733743470939Writing region close event to WAL at 1733743470940 (+1 ms)Closed at 1733743470940 2024-12-09T11:24:30,940 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:34951 2024-12-09T11:24:30,940 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:24:30,940 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:24:30,940 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:24:30,941 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,42711,1733743470660 with port=34951, startcode=1733743470706 2024-12-09T11:24:30,942 DEBUG [RS:0;2dff3a36d44f:34951 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:24:30,942 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:24:30,942 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:24:30,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:24:30,943 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:24:30,945 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:24:30,947 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33417, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:24:30,948 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42711 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:30,948 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42711 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:30,949 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8 2024-12-09T11:24:30,949 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33389 2024-12-09T11:24:30,949 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:24:30,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:24:30,953 DEBUG [RS:0;2dff3a36d44f:34951 {}] zookeeper.ZKUtil(111): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:30,953 WARN [RS:0;2dff3a36d44f:34951 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:24:30,953 INFO [RS:0;2dff3a36d44f:34951 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:24:30,953 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:30,955 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,34951,1733743470706] 2024-12-09T11:24:30,958 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:24:30,960 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:24:30,961 INFO [RS:0;2dff3a36d44f:34951 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:24:30,961 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,961 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:24:30,962 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:24:30,962 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:24:30,962 DEBUG [RS:0;2dff3a36d44f:34951 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:24:30,963 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,963 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,963 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,963 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,963 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,963 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,34951,1733743470706-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:24:30,982 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:24:30,982 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,34951,1733743470706-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,983 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,983 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.Replication(171): 2dff3a36d44f,34951,1733743470706 started 2024-12-09T11:24:30,998 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:30,998 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,34951,1733743470706, RpcServer on 2dff3a36d44f/172.17.0.3:34951, sessionid=0x1012aed78570001 2024-12-09T11:24:30,998 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:24:30,999 DEBUG [RS:0;2dff3a36d44f:34951 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:30,999 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,34951,1733743470706' 2024-12-09T11:24:30,999 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:24:30,999 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:24:30,999 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:24:31,000 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:24:31,000 DEBUG [RS:0;2dff3a36d44f:34951 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:31,000 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,34951,1733743470706' 2024-12-09T11:24:31,000 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:24:31,000 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:24:31,000 DEBUG [RS:0;2dff3a36d44f:34951 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:24:31,000 INFO [RS:0;2dff3a36d44f:34951 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:24:31,000 INFO [RS:0;2dff3a36d44f:34951 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:24:31,095 WARN [2dff3a36d44f:42711 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T11:24:31,102 INFO [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C34951%2C1733743470706, suffix=, logDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706, archiveDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/oldWALs, maxLogs=32 2024-12-09T11:24:31,103 INFO [RS:0;2dff3a36d44f:34951 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34951%2C1733743470706.1733743471103 2024-12-09T11:24:31,108 INFO [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743471103 2024-12-09T11:24:31,109 DEBUG [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34049:34049),(127.0.0.1/127.0.0.1:44369:44369)] 2024-12-09T11:24:31,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:31,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:31,345 DEBUG [2dff3a36d44f:42711 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:24:31,346 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:31,347 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,34951,1733743470706, state=OPENING 2024-12-09T11:24:31,349 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:24:31,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:31,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:24:31,351 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:24:31,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:24:31,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:24:31,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,34951,1733743470706}] 2024-12-09T11:24:31,504 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:24:31,506 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59541, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:24:31,510 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:24:31,510 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:24:31,512 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C34951%2C1733743470706.meta, suffix=.meta, logDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706, archiveDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/oldWALs, maxLogs=32 2024-12-09T11:24:31,512 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34951%2C1733743470706.meta.1733743471512.meta 2024-12-09T11:24:31,518 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.meta.1733743471512.meta 2024-12-09T11:24:31,525 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34049:34049),(127.0.0.1/127.0.0.1:44369:44369)] 2024-12-09T11:24:31,526 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:24:31,527 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:24:31,527 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:24:31,527 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:24:31,527 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:24:31,527 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:24:31,527 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:24:31,527 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:24:31,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:24:31,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:24:31,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:31,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:31,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:24:31,531 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:24:31,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:31,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:31,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:24:31,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:24:31,532 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:31,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:31,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:24:31,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:24:31,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:31,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:24:31,533 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:24:31,534 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740 2024-12-09T11:24:31,535 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740 2024-12-09T11:24:31,536 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:24:31,536 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:24:31,537 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:24:31,538 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:24:31,539 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853219, jitterRate=0.0849248617887497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:24:31,539 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:24:31,540 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743471527Writing region info on filesystem at 1733743471527Initializing all the Stores at 1733743471528 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743471528Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743471528Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743471528Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743471528Cleaning up temporary data from old regions at 1733743471536 (+8 ms)Running coprocessor post-open hooks at 1733743471539 (+3 ms)Region opened successfully at 1733743471539 2024-12-09T11:24:31,541 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743471504 2024-12-09T11:24:31,544 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:24:31,544 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:24:31,544 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:31,546 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,34951,1733743470706, state=OPEN 2024-12-09T11:24:31,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:24:31,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:24:31,554 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:31,554 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:24:31,554 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:24:31,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:24:31,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,34951,1733743470706 in 203 msec 2024-12-09T11:24:31,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:24:31,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-12-09T11:24:31,562 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:24:31,562 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:24:31,564 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:24:31,564 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,34951,1733743470706, seqNum=-1] 2024-12-09T11:24:31,565 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:24:31,566 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:24:31,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 673 msec 2024-12-09T11:24:31,579 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743471579, completionTime=-1 2024-12-09T11:24:31,579 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:24:31,579 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743531581 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743591581 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42711,1733743470660-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42711,1733743470660-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,581 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42711,1733743470660-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,582 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:42711, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,582 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,582 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,583 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.841sec 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42711,1733743470660-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:24:31,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42711,1733743470660-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:24:31,590 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:24:31,591 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:24:31,591 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,42711,1733743470660-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:24:31,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b5d5bef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:24:31,620 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,42711,-1 for getting cluster id 2024-12-09T11:24:31,620 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:24:31,623 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c1d0c6eb-0c59-4e2d-8a4d-6e64727cfd29' 2024-12-09T11:24:31,623 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:24:31,623 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c1d0c6eb-0c59-4e2d-8a4d-6e64727cfd29" 2024-12-09T11:24:31,624 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79c565cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:24:31,624 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,42711,-1] 2024-12-09T11:24:31,624 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:24:31,624 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:24:31,626 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56458, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:24:31,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1386dcba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:24:31,628 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:24:31,629 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,34951,1733743470706, seqNum=-1] 2024-12-09T11:24:31,629 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:24:31,631 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:24:31,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:31,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:24:31,636 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:24:31,636 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T11:24:31,637 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 2dff3a36d44f,42711,1733743470660 2024-12-09T11:24:31,638 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45b960b5 2024-12-09T11:24:31,638 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T11:24:31,639 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56474, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T11:24:31,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T11:24:31,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T11:24:31,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:24:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:31,643 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:24:31,643 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:31,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-09T11:24:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:24:31,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:24:31,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741835_1011 (size=405) 2024-12-09T11:24:31,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741835_1011 (size=405) 2024-12-09T11:24:31,659 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => deeb07bfe0d2acce22d33c896b406e8f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8 2024-12-09T11:24:31,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741836_1012 (size=88) 2024-12-09T11:24:31,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741836_1012 (size=88) 2024-12-09T11:24:31,669 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:24:31,669 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing deeb07bfe0d2acce22d33c896b406e8f, disabling compactions & flushes 2024-12-09T11:24:31,669 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:31,669 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:31,669 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. after waiting 0 ms 2024-12-09T11:24:31,669 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:31,669 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:31,669 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for deeb07bfe0d2acce22d33c896b406e8f: Waiting for close lock at 1733743471669Disabling compacts and flushes for region at 1733743471669Disabling writes for close at 1733743471669Writing region close event to WAL at 1733743471669Closed at 1733743471669 2024-12-09T11:24:31,670 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:24:31,671 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733743471670"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743471670"}]},"ts":"1733743471670"} 2024-12-09T11:24:31,673 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T11:24:31,674 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:24:31,674 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743471674"}]},"ts":"1733743471674"} 2024-12-09T11:24:31,678 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T11:24:31,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=deeb07bfe0d2acce22d33c896b406e8f, ASSIGN}] 2024-12-09T11:24:31,680 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=deeb07bfe0d2acce22d33c896b406e8f, ASSIGN 2024-12-09T11:24:31,681 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=deeb07bfe0d2acce22d33c896b406e8f, ASSIGN; state=OFFLINE, location=2dff3a36d44f,34951,1733743470706; forceNewPlan=false, retain=false 2024-12-09T11:24:31,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=deeb07bfe0d2acce22d33c896b406e8f, regionState=OPENING, regionLocation=2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:31,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=deeb07bfe0d2acce22d33c896b406e8f, ASSIGN because future has completed 2024-12-09T11:24:31,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure deeb07bfe0d2acce22d33c896b406e8f, server=2dff3a36d44f,34951,1733743470706}] 2024-12-09T11:24:31,993 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:31,993 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => deeb07bfe0d2acce22d33c896b406e8f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:24:31,994 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:31,994 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:24:31,994 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:31,994 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,002 INFO [StoreOpener-deeb07bfe0d2acce22d33c896b406e8f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,012 INFO [StoreOpener-deeb07bfe0d2acce22d33c896b406e8f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region deeb07bfe0d2acce22d33c896b406e8f columnFamilyName info 2024-12-09T11:24:32,012 DEBUG [StoreOpener-deeb07bfe0d2acce22d33c896b406e8f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:24:32,014 INFO [StoreOpener-deeb07bfe0d2acce22d33c896b406e8f-1 {}] regionserver.HStore(327): Store=deeb07bfe0d2acce22d33c896b406e8f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:24:32,014 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,015 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,019 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,019 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,019 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,021 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,025 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:24:32,025 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened deeb07bfe0d2acce22d33c896b406e8f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702911, jitterRate=-0.10620345175266266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:24:32,025 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:24:32,026 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for deeb07bfe0d2acce22d33c896b406e8f: Running coprocessor pre-open hook at 1733743471994Writing region info on filesystem at 1733743471994Initializing all the Stores at 1733743471995 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743471995Cleaning up temporary data from old regions at 1733743472019 (+24 ms)Running coprocessor post-open hooks at 1733743472025 (+6 ms)Region opened successfully at 1733743472026 (+1 ms) 2024-12-09T11:24:32,028 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f., pid=6, masterSystemTime=1733743471988 2024-12-09T11:24:32,032 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:32,032 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:32,034 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=deeb07bfe0d2acce22d33c896b406e8f, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,34951,1733743470706 2024-12-09T11:24:32,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure deeb07bfe0d2acce22d33c896b406e8f, server=2dff3a36d44f,34951,1733743470706 because future has completed 2024-12-09T11:24:32,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T11:24:32,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure deeb07bfe0d2acce22d33c896b406e8f, server=2dff3a36d44f,34951,1733743470706 in 216 msec 2024-12-09T11:24:32,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T11:24:32,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=deeb07bfe0d2acce22d33c896b406e8f, ASSIGN in 385 msec 2024-12-09T11:24:32,073 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:24:32,074 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743472073"}]},"ts":"1733743472073"} 2024-12-09T11:24:32,077 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T11:24:32,078 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:24:32,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 438 msec 2024-12-09T11:24:32,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:32,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:33,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:33,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:34,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:34,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:35,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:35,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:36,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:24:36,171 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T11:24:36,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:24:36,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T11:24:36,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T11:24:36,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T11:24:36,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:36,172 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T11:24:36,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:36,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:37,029 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:24:37,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:24:37,059 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:24:37,059 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T11:24:37,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:37,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:38,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:38,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:39,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:39,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:40,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:40,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:41,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:41,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:41,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:24:41,686 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T11:24:41,686 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-09T11:24:41,689 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:41,689 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:41,692 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f., hostname=2dff3a36d44f,34951,1733743470706, seqNum=2] 2024-12-09T11:24:41,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:41,705 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T11:24:41,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T11:24:41,706 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T11:24:41,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T11:24:41,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34951 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T11:24:41,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:41,867 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing deeb07bfe0d2acce22d33c896b406e8f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T11:24:41,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/608991846f914c95817272f8dec734a4 is 1080, key is row0001/info:/1733743481693/Put/seqid=0 2024-12-09T11:24:41,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741837_1013 (size=6033) 2024-12-09T11:24:41,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741837_1013 (size=6033) 2024-12-09T11:24:41,890 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/608991846f914c95817272f8dec734a4 2024-12-09T11:24:41,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/608991846f914c95817272f8dec734a4 as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/608991846f914c95817272f8dec734a4 2024-12-09T11:24:41,903 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/608991846f914c95817272f8dec734a4, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T11:24:41,904 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for deeb07bfe0d2acce22d33c896b406e8f in 37ms, sequenceid=5, compaction requested=false 2024-12-09T11:24:41,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for deeb07bfe0d2acce22d33c896b406e8f: 2024-12-09T11:24:41,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:41,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T11:24:41,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T11:24:41,911 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T11:24:41,911 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 202 msec 2024-12-09T11:24:41,913 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 212 msec 2024-12-09T11:24:42,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:42,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:43,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:43,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:44,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:44,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:45,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:45,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:46,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:46,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:47,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:47,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:48,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:48,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:49,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:49,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:50,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:50,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:51,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:51,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T11:24:51,735 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T11:24:51,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:24:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T11:24:51,741 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T11:24:51,742 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T11:24:51,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T11:24:51,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34951 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-09T11:24:51,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:51,896 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing deeb07bfe0d2acce22d33c896b406e8f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T11:24:51,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/a21be0b2a05d4e9b944da69c449a496e is 1080, key is row0002/info:/1733743491737/Put/seqid=0 2024-12-09T11:24:51,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741838_1014 (size=6033) 2024-12-09T11:24:51,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741838_1014 (size=6033) 2024-12-09T11:24:51,907 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/a21be0b2a05d4e9b944da69c449a496e 2024-12-09T11:24:51,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/a21be0b2a05d4e9b944da69c449a496e as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a21be0b2a05d4e9b944da69c449a496e 2024-12-09T11:24:51,928 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a21be0b2a05d4e9b944da69c449a496e, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T11:24:51,930 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for deeb07bfe0d2acce22d33c896b406e8f in 33ms, sequenceid=9, compaction requested=false 2024-12-09T11:24:51,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for deeb07bfe0d2acce22d33c896b406e8f: 2024-12-09T11:24:51,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:24:51,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-09T11:24:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-09T11:24:51,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T11:24:51,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-09T11:24:51,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-12-09T11:24:52,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:52,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:53,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:53,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:54,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:54,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:55,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:55,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:55,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta after 68038ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:55,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 after 68064ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T11:24:56,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:56,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:57,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:57,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:58,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:58,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:59,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:24:59,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:00,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:00,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:00,645 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:25:01,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:01,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T11:25:01,826 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T11:25:01,829 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34951%2C1733743470706.1733743501829 2024-12-09T11:25:01,844 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:01,844 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:01,844 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:01,844 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:01,844 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:01,845 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743471103 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743501829 2024-12-09T11:25:01,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741833_1009 (size=5546) 2024-12-09T11:25:01,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741833_1009 (size=5546) 2024-12-09T11:25:01,851 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34049:34049),(127.0.0.1/127.0.0.1:44369:44369)] 2024-12-09T11:25:01,851 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743471103 is not closed yet, will try archiving it next time 2024-12-09T11:25:01,852 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:25:01,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:25:01,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T11:25:01,854 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T11:25:01,855 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T11:25:01,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T11:25:02,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34951 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-09T11:25:02,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:02,010 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing deeb07bfe0d2acce22d33c896b406e8f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T11:25:02,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/06858413966b44b5a56b926d67e51c81 is 1080, key is row0003/info:/1733743501827/Put/seqid=0 2024-12-09T11:25:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741840_1016 (size=6033) 2024-12-09T11:25:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741840_1016 (size=6033) 2024-12-09T11:25:02,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:02,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:02,421 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/06858413966b44b5a56b926d67e51c81 2024-12-09T11:25:02,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/06858413966b44b5a56b926d67e51c81 as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/06858413966b44b5a56b926d67e51c81 2024-12-09T11:25:02,433 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/06858413966b44b5a56b926d67e51c81, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T11:25:02,434 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for deeb07bfe0d2acce22d33c896b406e8f in 424ms, sequenceid=13, compaction requested=true 2024-12-09T11:25:02,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for deeb07bfe0d2acce22d33c896b406e8f: 2024-12-09T11:25:02,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:02,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-09T11:25:02,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-09T11:25:02,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T11:25:02,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 581 msec 2024-12-09T11:25:02,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 588 msec 2024-12-09T11:25:03,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:03,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:04,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:04,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:05,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:05,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:06,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:06,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:07,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:07,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:08,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:08,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:09,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:09,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:10,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:10,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:11,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:11,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:11,596 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T11:25:11,596 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T11:25:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T11:25:11,876 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T11:25:11,876 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:25:11,877 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:25:11,877 DEBUG [Time-limited test {}] regionserver.HStore(1541): deeb07bfe0d2acce22d33c896b406e8f/info is initiating minor compaction (all files) 2024-12-09T11:25:11,878 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:25:11,878 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:11,878 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of deeb07bfe0d2acce22d33c896b406e8f/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:11,878 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/608991846f914c95817272f8dec734a4, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a21be0b2a05d4e9b944da69c449a496e, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/06858413966b44b5a56b926d67e51c81] into tmpdir=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp, totalSize=17.7 K 2024-12-09T11:25:11,878 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 608991846f914c95817272f8dec734a4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733743481693 2024-12-09T11:25:11,879 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a21be0b2a05d4e9b944da69c449a496e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733743491737 2024-12-09T11:25:11,879 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 06858413966b44b5a56b926d67e51c81, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733743501827 2024-12-09T11:25:11,891 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): deeb07bfe0d2acce22d33c896b406e8f#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:25:11,891 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/40a471e575af472ab1cccf488cd87547 is 1080, key is row0001/info:/1733743481693/Put/seqid=0 2024-12-09T11:25:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741841_1017 (size=8296) 2024-12-09T11:25:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741841_1017 (size=8296) 2024-12-09T11:25:11,902 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/40a471e575af472ab1cccf488cd87547 as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/40a471e575af472ab1cccf488cd87547 2024-12-09T11:25:11,908 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in deeb07bfe0d2acce22d33c896b406e8f/info of deeb07bfe0d2acce22d33c896b406e8f into 40a471e575af472ab1cccf488cd87547(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:25:11,908 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for deeb07bfe0d2acce22d33c896b406e8f: 2024-12-09T11:25:11,911 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34951%2C1733743470706.1733743511911 2024-12-09T11:25:11,916 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:11,916 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:11,916 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:11,916 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:11,916 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:11,916 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743501829 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743511911 2024-12-09T11:25:11,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741839_1015 (size=2520) 2024-12-09T11:25:11,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741839_1015 (size=2520) 2024-12-09T11:25:11,923 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743471103 to hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/oldWALs/2dff3a36d44f%2C34951%2C1733743470706.1733743471103 2024-12-09T11:25:11,923 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34049:34049),(127.0.0.1/127.0.0.1:44369:44369)] 2024-12-09T11:25:11,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:25:11,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:25:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T11:25:11,927 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T11:25:11,928 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T11:25:11,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T11:25:12,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34951 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-09T11:25:12,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:12,082 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing deeb07bfe0d2acce22d33c896b406e8f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T11:25:12,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/c3826050946641619c349884a26f7e7b is 1080, key is row0000/info:/1733743511909/Put/seqid=0 2024-12-09T11:25:12,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741843_1019 (size=6033) 2024-12-09T11:25:12,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741843_1019 (size=6033) 2024-12-09T11:25:12,091 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/c3826050946641619c349884a26f7e7b 2024-12-09T11:25:12,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/c3826050946641619c349884a26f7e7b as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/c3826050946641619c349884a26f7e7b 2024-12-09T11:25:12,106 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/c3826050946641619c349884a26f7e7b, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T11:25:12,107 INFO [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for deeb07bfe0d2acce22d33c896b406e8f in 26ms, sequenceid=18, compaction requested=false 2024-12-09T11:25:12,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for deeb07bfe0d2acce22d33c896b406e8f: 2024-12-09T11:25:12,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:12,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T11:25:12,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T11:25:12,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T11:25:12,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-09T11:25:12,115 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-12-09T11:25:12,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:12,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:13,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:13,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:14,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:14,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:15,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:15,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:16,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:16,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:16,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region deeb07bfe0d2acce22d33c896b406e8f, had cached 0 bytes from a total of 14329 2024-12-09T11:25:17,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:17,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:18,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:18,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:19,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:19,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:20,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:20,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:21,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:21,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:21,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42711 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T11:25:21,966 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T11:25:21,969 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C34951%2C1733743470706.1733743521969 2024-12-09T11:25:21,974 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:21,974 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:21,975 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:21,975 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:21,975 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:21,975 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743511911 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743521969 2024-12-09T11:25:21,976 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34049:34049),(127.0.0.1/127.0.0.1:44369:44369)] 2024-12-09T11:25:21,976 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743511911 is not closed yet, will try archiving it next time 2024-12-09T11:25:21,976 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/WALs/2dff3a36d44f,34951,1733743470706/2dff3a36d44f%2C34951%2C1733743470706.1733743501829 to hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/oldWALs/2dff3a36d44f%2C34951%2C1733743470706.1733743501829 2024-12-09T11:25:21,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:25:21,976 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:25:21,976 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:25:21,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:25:21,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:25:21,976 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:25:21,977 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1378781048, stopped=false 2024-12-09T11:25:21,977 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,42711,1733743470660 2024-12-09T11:25:21,977 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:25:21,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741842_1018 (size=2026) 2024-12-09T11:25:21,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741842_1018 (size=2026) 2024-12-09T11:25:21,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:25:21,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:21,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:25:21,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:21,983 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:25:21,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:25:21,983 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:25:21,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:25:21,983 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,34951,1733743470706' ***** 2024-12-09T11:25:21,983 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:25:21,983 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:25:21,983 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(3091): Received CLOSE for deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:25:21,984 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,34951,1733743470706 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:34951. 2024-12-09T11:25:21,984 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:25:21,984 DEBUG [RS:0;2dff3a36d44f:34951 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:25:21,984 DEBUG [RS:0;2dff3a36d44f:34951 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:25:21,984 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:25:21,985 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing deeb07bfe0d2acce22d33c896b406e8f, disabling compactions & flushes 2024-12-09T11:25:21,985 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:21,985 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:21,985 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. after waiting 0 ms 2024-12-09T11:25:21,985 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:21,985 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing deeb07bfe0d2acce22d33c896b406e8f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T11:25:21,985 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T11:25:21,985 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, deeb07bfe0d2acce22d33c896b406e8f=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.} 2024-12-09T11:25:21,985 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, deeb07bfe0d2acce22d33c896b406e8f 2024-12-09T11:25:21,986 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:25:21,986 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:25:21,986 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:25:21,986 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:25:21,986 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:25:21,986 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-09T11:25:21,989 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/a47cd0d2827d45608031c403abe01f5f is 1080, key is row0001/info:/1733743521967/Put/seqid=0 2024-12-09T11:25:21,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741845_1021 (size=6033) 2024-12-09T11:25:21,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741845_1021 (size=6033) 2024-12-09T11:25:21,998 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/a47cd0d2827d45608031c403abe01f5f 2024-12-09T11:25:22,004 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/info/ac062ca35f4c45af9c3471f7d5160c9e is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f./info:regioninfo/1733743472033/Put/seqid=0 2024-12-09T11:25:22,005 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/.tmp/info/a47cd0d2827d45608031c403abe01f5f as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a47cd0d2827d45608031c403abe01f5f 2024-12-09T11:25:22,010 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a47cd0d2827d45608031c403abe01f5f, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T11:25:22,011 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for deeb07bfe0d2acce22d33c896b406e8f in 26ms, sequenceid=22, compaction requested=true 2024-12-09T11:25:22,015 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/608991846f914c95817272f8dec734a4, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a21be0b2a05d4e9b944da69c449a496e, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/06858413966b44b5a56b926d67e51c81] to archive 2024-12-09T11:25:22,016 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:25:22,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741846_1022 (size=7308) 2024-12-09T11:25:22,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741846_1022 (size=7308) 2024-12-09T11:25:22,017 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/info/ac062ca35f4c45af9c3471f7d5160c9e 2024-12-09T11:25:22,018 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/608991846f914c95817272f8dec734a4 to hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/608991846f914c95817272f8dec734a4 2024-12-09T11:25:22,019 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a21be0b2a05d4e9b944da69c449a496e to hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/a21be0b2a05d4e9b944da69c449a496e 2024-12-09T11:25:22,021 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/06858413966b44b5a56b926d67e51c81 to hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/info/06858413966b44b5a56b926d67e51c81 2024-12-09T11:25:22,021 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2dff3a36d44f:42711 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T11:25:22,021 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [608991846f914c95817272f8dec734a4=6033, a21be0b2a05d4e9b944da69c449a496e=6033, 06858413966b44b5a56b926d67e51c81=6033] 2024-12-09T11:25:22,024 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:25:22,024 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:25:22,029 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/deeb07bfe0d2acce22d33c896b406e8f/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T11:25:22,030 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:22,030 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for deeb07bfe0d2acce22d33c896b406e8f: Waiting for close lock at 1733743521985Running coprocessor pre-close hooks at 1733743521985Disabling compacts and flushes for region at 1733743521985Disabling writes for close at 1733743521985Obtaining lock to block concurrent updates at 1733743521985Preparing flush snapshotting stores in deeb07bfe0d2acce22d33c896b406e8f at 1733743521985Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733743521985Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. at 1733743521986 (+1 ms)Flushing deeb07bfe0d2acce22d33c896b406e8f/info: creating writer at 1733743521986Flushing deeb07bfe0d2acce22d33c896b406e8f/info: appending metadata at 1733743521988 (+2 ms)Flushing deeb07bfe0d2acce22d33c896b406e8f/info: closing flushed file at 1733743521988Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c86bbde: reopening flushed file at 1733743522004 (+16 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for deeb07bfe0d2acce22d33c896b406e8f in 26ms, sequenceid=22, compaction requested=true at 1733743522011 (+7 ms)Writing region close event to WAL at 1733743522022 (+11 ms)Running coprocessor post-close hooks at 1733743522030 (+8 ms)Closed at 1733743522030 2024-12-09T11:25:22,030 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733743471640.deeb07bfe0d2acce22d33c896b406e8f. 2024-12-09T11:25:22,037 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/ns/53b7a895282a43fca5a8de7c99fdb84f is 43, key is default/ns:d/1733743471567/Put/seqid=0 2024-12-09T11:25:22,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741847_1023 (size=5153) 2024-12-09T11:25:22,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741847_1023 (size=5153) 2024-12-09T11:25:22,186 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T11:25:22,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:22,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:22,386 DEBUG [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T11:25:22,443 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/ns/53b7a895282a43fca5a8de7c99fdb84f 2024-12-09T11:25:22,466 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/table/6a515d3474b7434abd37e06d3c837953 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733743472073/Put/seqid=0 2024-12-09T11:25:22,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741848_1024 (size=5508) 2024-12-09T11:25:22,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741848_1024 (size=5508) 2024-12-09T11:25:22,477 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/table/6a515d3474b7434abd37e06d3c837953 2024-12-09T11:25:22,483 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/info/ac062ca35f4c45af9c3471f7d5160c9e as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/info/ac062ca35f4c45af9c3471f7d5160c9e 2024-12-09T11:25:22,488 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/info/ac062ca35f4c45af9c3471f7d5160c9e, entries=10, sequenceid=11, filesize=7.1 K 2024-12-09T11:25:22,489 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/ns/53b7a895282a43fca5a8de7c99fdb84f as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/ns/53b7a895282a43fca5a8de7c99fdb84f 2024-12-09T11:25:22,494 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/ns/53b7a895282a43fca5a8de7c99fdb84f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T11:25:22,495 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/.tmp/table/6a515d3474b7434abd37e06d3c837953 as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/table/6a515d3474b7434abd37e06d3c837953 2024-12-09T11:25:22,500 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/table/6a515d3474b7434abd37e06d3c837953, entries=2, sequenceid=11, filesize=5.4 K 2024-12-09T11:25:22,501 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 515ms, sequenceid=11, compaction requested=false 2024-12-09T11:25:22,511 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T11:25:22,511 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:25:22,511 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:25:22,511 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743521985Running coprocessor pre-close hooks at 1733743521985Disabling compacts and flushes for region at 1733743521985Disabling writes for close at 1733743521986 (+1 ms)Obtaining lock to block concurrent updates at 1733743521986Preparing flush snapshotting stores in 1588230740 at 1733743521986Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733743521986Flushing stores of hbase:meta,,1.1588230740 at 1733743521988 (+2 ms)Flushing 1588230740/info: creating writer at 1733743521988Flushing 1588230740/info: appending metadata at 1733743522004 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733743522004Flushing 1588230740/ns: creating writer at 1733743522022 (+18 ms)Flushing 1588230740/ns: appending metadata at 1733743522037 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733743522037Flushing 1588230740/table: creating writer at 1733743522449 (+412 ms)Flushing 1588230740/table: appending metadata at 1733743522466 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733743522466Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48d23636: reopening flushed file at 1733743522483 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10eb51ba: reopening flushed file at 1733743522489 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fe62e71: reopening flushed file at 1733743522494 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 515ms, sequenceid=11, compaction requested=false at 1733743522501 (+7 ms)Writing region close event to WAL at 1733743522507 (+6 ms)Running coprocessor post-close hooks at 1733743522511 (+4 ms)Closed at 1733743522511 2024-12-09T11:25:22,512 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:25:22,586 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,34951,1733743470706; all regions closed. 2024-12-09T11:25:22,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741834_1010 (size=3306) 2024-12-09T11:25:22,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741834_1010 (size=3306) 2024-12-09T11:25:22,591 DEBUG [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/oldWALs 2024-12-09T11:25:22,591 INFO [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C34951%2C1733743470706.meta:.meta(num 1733743471512) 2024-12-09T11:25:22,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,592 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,592 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,592 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,592 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741844_1020 (size=1252) 2024-12-09T11:25:22,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741844_1020 (size=1252) 2024-12-09T11:25:22,597 DEBUG [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/oldWALs 2024-12-09T11:25:22,597 INFO [RS:0;2dff3a36d44f:34951 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C34951%2C1733743470706:(num 1733743521969) 2024-12-09T11:25:22,597 DEBUG [RS:0;2dff3a36d44f:34951 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:25:22,597 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:25:22,597 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:25:22,597 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:25:22,597 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:25:22,597 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:25:22,597 INFO [RS:0;2dff3a36d44f:34951 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34951 2024-12-09T11:25:22,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,34951,1733743470706 2024-12-09T11:25:22,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:25:22,600 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:25:22,600 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,34951,1733743470706] 2024-12-09T11:25:22,604 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,34951,1733743470706 already deleted, retry=false 2024-12-09T11:25:22,604 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,34951,1733743470706 expired; onlineServers=0 2024-12-09T11:25:22,604 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,42711,1733743470660' ***** 2024-12-09T11:25:22,604 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:25:22,604 INFO [M:0;2dff3a36d44f:42711 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:25:22,604 INFO [M:0;2dff3a36d44f:42711 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:25:22,604 DEBUG [M:0;2dff3a36d44f:42711 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:25:22,604 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:25:22,604 DEBUG [M:0;2dff3a36d44f:42711 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:25:22,604 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743470910 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743470910,5,FailOnTimeoutGroup] 2024-12-09T11:25:22,604 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743470911 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743470911,5,FailOnTimeoutGroup] 2024-12-09T11:25:22,605 INFO [M:0;2dff3a36d44f:42711 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:25:22,605 INFO [M:0;2dff3a36d44f:42711 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:25:22,605 DEBUG [M:0;2dff3a36d44f:42711 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:25:22,605 INFO [M:0;2dff3a36d44f:42711 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:25:22,605 INFO [M:0;2dff3a36d44f:42711 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:25:22,605 INFO [M:0;2dff3a36d44f:42711 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:25:22,605 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:25:22,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:25:22,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:22,607 DEBUG [M:0;2dff3a36d44f:42711 {}] zookeeper.ZKUtil(347): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:25:22,607 WARN [M:0;2dff3a36d44f:42711 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:25:22,607 INFO [M:0;2dff3a36d44f:42711 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/.lastflushedseqids 2024-12-09T11:25:22,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741849_1025 (size=130) 2024-12-09T11:25:22,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741849_1025 (size=130) 2024-12-09T11:25:22,612 INFO [M:0;2dff3a36d44f:42711 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:25:22,612 INFO [M:0;2dff3a36d44f:42711 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:25:22,612 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:25:22,612 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:22,612 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:22,612 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:25:22,612 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:22,613 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.89 KB 2024-12-09T11:25:22,632 DEBUG [M:0;2dff3a36d44f:42711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/31ff9f29aed5463fb7ae31d26eef6113 is 82, key is hbase:meta,,1/info:regioninfo/1733743471544/Put/seqid=0 2024-12-09T11:25:22,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741850_1026 (size=5672) 2024-12-09T11:25:22,637 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/31ff9f29aed5463fb7ae31d26eef6113 2024-12-09T11:25:22,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741850_1026 (size=5672) 2024-12-09T11:25:22,666 DEBUG [M:0;2dff3a36d44f:42711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8dfec8d5167842f4af3766f0f7519ded is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733743472080/Put/seqid=0 2024-12-09T11:25:22,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741851_1027 (size=7817) 2024-12-09T11:25:22,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741851_1027 (size=7817) 2024-12-09T11:25:22,671 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.93 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8dfec8d5167842f4af3766f0f7519ded 2024-12-09T11:25:22,675 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8dfec8d5167842f4af3766f0f7519ded 2024-12-09T11:25:22,690 DEBUG [M:0;2dff3a36d44f:42711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/31925891987f4a5bb691d7c0f572246e is 69, key is 2dff3a36d44f,34951,1733743470706/rs:state/1733743470948/Put/seqid=0 2024-12-09T11:25:22,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741852_1028 (size=5156) 2024-12-09T11:25:22,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741852_1028 (size=5156) 2024-12-09T11:25:22,696 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/31925891987f4a5bb691d7c0f572246e 2024-12-09T11:25:22,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:25:22,704 INFO [RS:0;2dff3a36d44f:34951 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:25:22,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34951-0x1012aed78570001, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:25:22,704 INFO [RS:0;2dff3a36d44f:34951 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,34951,1733743470706; zookeeper connection closed. 2024-12-09T11:25:22,704 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fd5f2ad {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fd5f2ad 2024-12-09T11:25:22,704 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T11:25:22,714 DEBUG [M:0;2dff3a36d44f:42711 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a380eb92b9ca4dc1b7f79786fc3192fb is 52, key is load_balancer_on/state:d/1733743471635/Put/seqid=0 2024-12-09T11:25:22,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741853_1029 (size=5056) 2024-12-09T11:25:22,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741853_1029 (size=5056) 2024-12-09T11:25:22,719 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a380eb92b9ca4dc1b7f79786fc3192fb 2024-12-09T11:25:22,726 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/31ff9f29aed5463fb7ae31d26eef6113 as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/31ff9f29aed5463fb7ae31d26eef6113 2024-12-09T11:25:22,731 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/31ff9f29aed5463fb7ae31d26eef6113, entries=8, sequenceid=121, filesize=5.5 K 2024-12-09T11:25:22,732 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8dfec8d5167842f4af3766f0f7519ded as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8dfec8d5167842f4af3766f0f7519ded 2024-12-09T11:25:22,738 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8dfec8d5167842f4af3766f0f7519ded 2024-12-09T11:25:22,738 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8dfec8d5167842f4af3766f0f7519ded, entries=14, sequenceid=121, filesize=7.6 K 2024-12-09T11:25:22,739 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/31925891987f4a5bb691d7c0f572246e as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/31925891987f4a5bb691d7c0f572246e 2024-12-09T11:25:22,743 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/31925891987f4a5bb691d7c0f572246e, entries=1, sequenceid=121, filesize=5.0 K 2024-12-09T11:25:22,744 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a380eb92b9ca4dc1b7f79786fc3192fb as hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a380eb92b9ca4dc1b7f79786fc3192fb 2024-12-09T11:25:22,750 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33389/user/jenkins/test-data/b4ca123d-51f5-920b-fb20-fe287a7c71a8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a380eb92b9ca4dc1b7f79786fc3192fb, entries=1, sequenceid=121, filesize=4.9 K 2024-12-09T11:25:22,751 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=121, compaction requested=false 2024-12-09T11:25:22,755 INFO [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:22,755 DEBUG [M:0;2dff3a36d44f:42711 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743522612Disabling compacts and flushes for region at 1733743522612Disabling writes for close at 1733743522612Obtaining lock to block concurrent updates at 1733743522613 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743522613Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44581, getHeapSize=56144, getOffHeapSize=0, getCellsCount=140 at 1733743522613Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743522614 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743522614Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743522632 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743522632Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743522642 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743522665 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743522665Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743522675 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743522690 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743522690Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743522700 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743522713 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743522713Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b397341: reopening flushed file at 1733743522725 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42f5fb63: reopening flushed file at 1733743522731 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30290540: reopening flushed file at 1733743522738 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7530dc0a: reopening flushed file at 1733743522743 (+5 ms)Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=121, compaction requested=false at 1733743522751 (+8 ms)Writing region close event to WAL at 1733743522755 (+4 ms)Closed at 1733743522755 2024-12-09T11:25:22,756 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,756 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,756 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,756 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:25:22,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43519 is added to blk_1073741830_1006 (size=52978) 2024-12-09T11:25:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36551 is added to blk_1073741830_1006 (size=52978) 2024-12-09T11:25:22,760 INFO [M:0;2dff3a36d44f:42711 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:25:22,760 INFO [M:0;2dff3a36d44f:42711 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42711 2024-12-09T11:25:22,760 INFO [M:0;2dff3a36d44f:42711 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:25:22,760 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:25:22,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:25:22,862 INFO [M:0;2dff3a36d44f:42711 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:25:22,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42711-0x1012aed78570000, quorum=127.0.0.1:52369, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:25:22,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@796941bb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:25:22,865 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a125e15{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:25:22,865 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:25:22,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5301759b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:25:22,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@351a06a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir/,STOPPED} 2024-12-09T11:25:22,867 WARN [BP-1691064892-172.17.0.3-1733743469955 heartbeating to localhost/127.0.0.1:33389 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:25:22,867 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:25:22,867 WARN [BP-1691064892-172.17.0.3-1733743469955 heartbeating to localhost/127.0.0.1:33389 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1691064892-172.17.0.3-1733743469955 (Datanode Uuid 68172ff3-9d70-454e-9f25-f60cc4fa0820) service to localhost/127.0.0.1:33389 2024-12-09T11:25:22,867 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:25:22,868 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data3/current/BP-1691064892-172.17.0.3-1733743469955 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:25:22,868 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data4/current/BP-1691064892-172.17.0.3-1733743469955 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:25:22,868 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:25:22,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50c25230{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:25:22,871 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1736cbce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:25:22,871 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:25:22,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22d895dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:25:22,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@239cfce9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir/,STOPPED} 2024-12-09T11:25:22,873 WARN [BP-1691064892-172.17.0.3-1733743469955 heartbeating to localhost/127.0.0.1:33389 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:25:22,873 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:25:22,873 WARN [BP-1691064892-172.17.0.3-1733743469955 heartbeating to localhost/127.0.0.1:33389 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1691064892-172.17.0.3-1733743469955 (Datanode Uuid 743c9a7a-e6d2-43e0-bfb6-98be5996fd9c) service to localhost/127.0.0.1:33389 2024-12-09T11:25:22,873 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:25:22,873 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data1/current/BP-1691064892-172.17.0.3-1733743469955 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:25:22,874 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/cluster_9dd52bfc-e6ac-fbe4-5558-cc5d09018284/data/data2/current/BP-1691064892-172.17.0.3-1733743469955 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:25:22,874 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:25:22,881 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1411d2ea{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:25:22,881 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67fc8227{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:25:22,881 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:25:22,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21aba41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:25:22,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@602005e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir/,STOPPED} 2024-12-09T11:25:22,888 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:25:22,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:25:22,921 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/2dff3a36d44f:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:33389 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:33389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:33389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:33389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=331 (was 371), ProcessCount=11 (was 11), AvailableMemoryMB=750 (was 1246) 2024-12-09T11:25:22,929 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=331, ProcessCount=11, AvailableMemoryMB=750 2024-12-09T11:25:22,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:25:22,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.log.dir so I do NOT create it in target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b 2024-12-09T11:25:22,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/006195d7-6043-24bd-d9c1-c883b4eb37f1/hadoop.tmp.dir so I do NOT create it in target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b 2024-12-09T11:25:22,929 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b, deleteOnExit=true 2024-12-09T11:25:22,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/test.cache.data in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:25:22,930 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:25:22,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:25:22,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:25:22,944 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:25:22,965 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:25:23,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:25:23,022 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:25:23,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:25:23,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:25:23,023 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:25:23,024 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:25:23,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fa21789{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:25:23,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa998c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:25:23,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f5b704d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/java.io.tmpdir/jetty-localhost-45549-hadoop-hdfs-3_4_1-tests_jar-_-any-4939245389886726960/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:25:23,148 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d2a8d11{HTTP/1.1, (http/1.1)}{localhost:45549} 2024-12-09T11:25:23,149 INFO [Time-limited test {}] server.Server(415): Started @244422ms 2024-12-09T11:25:23,161 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:25:23,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:25:23,224 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:25:23,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:25:23,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:25:23,225 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:25:23,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3339c3bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:25:23,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f4fe47f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:25:23,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:23,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:23,360 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@676fa0b1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/java.io.tmpdir/jetty-localhost-41043-hadoop-hdfs-3_4_1-tests_jar-_-any-12229559412948098909/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:25:23,360 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f76160c{HTTP/1.1, (http/1.1)}{localhost:41043} 2024-12-09T11:25:23,361 INFO [Time-limited test {}] server.Server(415): Started @244634ms 2024-12-09T11:25:23,362 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:25:23,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:25:23,418 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:25:23,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:25:23,419 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:25:23,419 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:25:23,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c9b8b8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:25:23,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a3c5032{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:25:23,494 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data1/current/BP-1341352516-172.17.0.3-1733743522950/current, will proceed with Du for space computation calculation, 2024-12-09T11:25:23,494 WARN [Thread-1957 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data2/current/BP-1341352516-172.17.0.3-1733743522950/current, will proceed with Du for space computation calculation, 2024-12-09T11:25:23,518 WARN [Thread-1935 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:25:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf463577dbc513f10 with lease ID 0x2f2f2e29dad63022: Processing first storage report for DS-b8bca284-c846-423b-bc40-97d22067ca27 from datanode DatanodeRegistration(127.0.0.1:32797, datanodeUuid=39ef2d8f-28f5-4523-a753-d072ae499f26, infoPort=36675, infoSecurePort=0, ipcPort=43107, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950) 2024-12-09T11:25:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf463577dbc513f10 with lease ID 0x2f2f2e29dad63022: from storage DS-b8bca284-c846-423b-bc40-97d22067ca27 node DatanodeRegistration(127.0.0.1:32797, datanodeUuid=39ef2d8f-28f5-4523-a753-d072ae499f26, infoPort=36675, infoSecurePort=0, ipcPort=43107, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:25:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf463577dbc513f10 with lease ID 0x2f2f2e29dad63022: Processing first storage report for DS-ea1e6451-b8c4-4bf9-832f-e5f7ecbbf7da from datanode DatanodeRegistration(127.0.0.1:32797, datanodeUuid=39ef2d8f-28f5-4523-a753-d072ae499f26, infoPort=36675, infoSecurePort=0, ipcPort=43107, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950) 2024-12-09T11:25:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf463577dbc513f10 with lease ID 0x2f2f2e29dad63022: from storage DS-ea1e6451-b8c4-4bf9-832f-e5f7ecbbf7da node DatanodeRegistration(127.0.0.1:32797, datanodeUuid=39ef2d8f-28f5-4523-a753-d072ae499f26, infoPort=36675, infoSecurePort=0, ipcPort=43107, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:25:23,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@141cb2d0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/java.io.tmpdir/jetty-localhost-46701-hadoop-hdfs-3_4_1-tests_jar-_-any-9106648710175282608/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:25:23,545 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2671310d{HTTP/1.1, (http/1.1)}{localhost:46701} 2024-12-09T11:25:23,545 INFO [Time-limited test {}] server.Server(415): Started @244818ms 2024-12-09T11:25:23,546 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:25:23,656 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data3/current/BP-1341352516-172.17.0.3-1733743522950/current, will proceed with Du for space computation calculation, 2024-12-09T11:25:23,656 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data4/current/BP-1341352516-172.17.0.3-1733743522950/current, will proceed with Du for space computation calculation, 2024-12-09T11:25:23,673 WARN [Thread-1971 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:25:23,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xddc22335cad5e2ba with lease ID 0x2f2f2e29dad63023: Processing first storage report for DS-90230412-964c-4ff6-9fd6-b6f58a9940e2 from datanode DatanodeRegistration(127.0.0.1:39993, datanodeUuid=60082d73-9ac9-40ff-af90-86f55e63cb53, infoPort=36943, infoSecurePort=0, ipcPort=34845, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950) 2024-12-09T11:25:23,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xddc22335cad5e2ba with lease ID 0x2f2f2e29dad63023: from storage DS-90230412-964c-4ff6-9fd6-b6f58a9940e2 node DatanodeRegistration(127.0.0.1:39993, datanodeUuid=60082d73-9ac9-40ff-af90-86f55e63cb53, infoPort=36943, infoSecurePort=0, ipcPort=34845, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:25:23,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xddc22335cad5e2ba with lease ID 0x2f2f2e29dad63023: Processing first storage report for DS-55c49341-cb31-4293-8c4c-75b4fb549745 from datanode DatanodeRegistration(127.0.0.1:39993, datanodeUuid=60082d73-9ac9-40ff-af90-86f55e63cb53, infoPort=36943, infoSecurePort=0, ipcPort=34845, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950) 2024-12-09T11:25:23,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xddc22335cad5e2ba with lease ID 0x2f2f2e29dad63023: from storage DS-55c49341-cb31-4293-8c4c-75b4fb549745 node DatanodeRegistration(127.0.0.1:39993, datanodeUuid=60082d73-9ac9-40ff-af90-86f55e63cb53, infoPort=36943, infoSecurePort=0, ipcPort=34845, storageInfo=lv=-57;cid=testClusterID;nsid=1859079938;c=1733743522950), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:25:23,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b 2024-12-09T11:25:23,776 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/zookeeper_0, clientPort=52296, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:25:23,777 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52296 2024-12-09T11:25:23,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,779 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:25:23,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:25:23,793 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658 with version=8 2024-12-09T11:25:23,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:25:23,795 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:25:23,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:25:23,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:25:23,795 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:25:23,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:25:23,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:25:23,796 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:25:23,796 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:25:23,796 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43325 2024-12-09T11:25:23,797 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43325 connecting to ZooKeeper ensemble=127.0.0.1:52296 2024-12-09T11:25:23,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:433250x0, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:25:23,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43325-0x1012aee47df0000 connected 2024-12-09T11:25:23,824 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:25:23,826 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658, hbase.cluster.distributed=false 2024-12-09T11:25:23,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:25:23,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43325 2024-12-09T11:25:23,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43325 2024-12-09T11:25:23,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43325 2024-12-09T11:25:23,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43325 2024-12-09T11:25:23,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43325 2024-12-09T11:25:23,844 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:25:23,844 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:25:23,845 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38937 2024-12-09T11:25:23,846 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38937 connecting to ZooKeeper ensemble=127.0.0.1:52296 2024-12-09T11:25:23,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,848 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389370x0, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:25:23,852 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38937-0x1012aee47df0001 connected 2024-12-09T11:25:23,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:25:23,852 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:25:23,853 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:25:23,853 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:25:23,854 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:25:23,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38937 2024-12-09T11:25:23,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38937 2024-12-09T11:25:23,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38937 2024-12-09T11:25:23,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38937 2024-12-09T11:25:23,856 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38937 2024-12-09T11:25:23,867 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:43325 2024-12-09T11:25:23,868 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:23,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:25:23,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:25:23,870 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:23,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:25:23,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:23,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:23,876 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:25:23,876 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,43325,1733743523795 from backup master directory 2024-12-09T11:25:23,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:23,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:25:23,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:25:23,878 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:25:23,878 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:23,882 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/hbase.id] with ID: fbc0833f-e6e6-4234-a1fb-9ad018b5b461 2024-12-09T11:25:23,882 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/.tmp/hbase.id 2024-12-09T11:25:23,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:25:23,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:25:23,893 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/.tmp/hbase.id]:[hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/hbase.id] 2024-12-09T11:25:23,904 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:23,905 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:25:23,906 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T11:25:23,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:23,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:23,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:25:23,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:25:23,925 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:25:23,926 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:25:23,927 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:25:23,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:25:23,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:25:23,935 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store 2024-12-09T11:25:23,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:25:23,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:25:23,942 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:23,942 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:25:23,942 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:23,942 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:23,942 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:25:23,942 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:23,942 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:25:23,942 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743523942Disabling compacts and flushes for region at 1733743523942Disabling writes for close at 1733743523942Writing region close event to WAL at 1733743523942Closed at 1733743523942 2024-12-09T11:25:23,943 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/.initializing 2024-12-09T11:25:23,943 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/WALs/2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:23,945 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C43325%2C1733743523795, suffix=, logDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/WALs/2dff3a36d44f,43325,1733743523795, archiveDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/oldWALs, maxLogs=10 2024-12-09T11:25:23,946 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C43325%2C1733743523795.1733743523945 2024-12-09T11:25:23,950 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/WALs/2dff3a36d44f,43325,1733743523795/2dff3a36d44f%2C43325%2C1733743523795.1733743523945 2024-12-09T11:25:23,951 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36943:36943),(127.0.0.1/127.0.0.1:36675:36675)] 2024-12-09T11:25:23,951 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:25:23,951 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:23,951 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,951 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:25:23,954 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:23,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:23,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:25:23,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:23,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:25:23,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:25:23,957 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:23,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:25:23,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:25:23,958 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:23,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:25:23,959 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,959 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,960 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,961 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,961 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,961 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:25:23,962 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:25:23,964 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:25:23,964 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694311, jitterRate=-0.11713911592960358}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:25:23,965 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743523951Initializing all the Stores at 1733743523952 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743523952Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743523952Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743523952Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743523952Cleaning up temporary data from old regions at 1733743523961 (+9 ms)Region opened successfully at 1733743523965 (+4 ms) 2024-12-09T11:25:23,965 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:25:23,968 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42754b6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:25:23,969 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:25:23,969 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:25:23,969 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:25:23,969 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:25:23,970 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T11:25:23,970 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T11:25:23,970 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:25:23,972 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:25:23,973 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:25:23,978 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:25:23,978 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:25:23,979 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:25:23,986 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:25:23,986 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:25:23,987 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:25:23,988 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:25:23,989 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:25:23,991 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:25:23,993 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:25:23,994 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:25:23,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:25:23,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:25:23,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:23,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:23,998 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,43325,1733743523795, sessionid=0x1012aee47df0000, setting cluster-up flag (Was=false) 2024-12-09T11:25:24,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:24,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:24,008 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:25:24,009 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:24,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:24,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:24,018 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:25:24,019 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:24,020 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:25:24,022 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:25:24,022 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:25:24,022 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:25:24,023 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,43325,1733743523795 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:25:24,024 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743554025 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:25:24,025 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:25:24,026 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:25:24,026 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:25:24,026 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,026 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:25:24,026 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:25:24,026 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:25:24,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:25:24,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:25:24,027 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,027 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743524027,5,FailOnTimeoutGroup] 2024-12-09T11:25:24,027 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:25:24,027 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743524027,5,FailOnTimeoutGroup] 2024-12-09T11:25:24,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:25:24,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,027 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:25:24,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:25:24,033 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:25:24,034 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658 2024-12-09T11:25:24,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:25:24,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:25:24,039 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:24,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:25:24,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:25:24,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:25:24,043 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:25:24,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:25:24,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:25:24,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:25:24,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:25:24,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:25:24,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740 2024-12-09T11:25:24,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740 2024-12-09T11:25:24,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:25:24,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:25:24,049 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:25:24,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:25:24,051 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:25:24,052 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712773, jitterRate=-0.09366247057914734}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:25:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743524039Initializing all the Stores at 1733743524040 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743524040Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743524040Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743524040Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743524040Cleaning up temporary data from old regions at 1733743524048 (+8 ms)Region opened successfully at 1733743524052 (+4 ms) 2024-12-09T11:25:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:25:24,053 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:25:24,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:25:24,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:25:24,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:25:24,053 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:25:24,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743524052Disabling compacts and flushes for region at 1733743524052Disabling writes for close at 1733743524053 (+1 ms)Writing region close event to WAL at 1733743524053Closed at 1733743524053 2024-12-09T11:25:24,054 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:25:24,054 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:25:24,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:25:24,056 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:25:24,059 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:25:24,063 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(746): ClusterId : fbc0833f-e6e6-4234-a1fb-9ad018b5b461 2024-12-09T11:25:24,063 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:25:24,065 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:25:24,065 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:25:24,069 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:25:24,069 DEBUG [RS:0;2dff3a36d44f:38937 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@354ee59a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:25:24,085 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:38937 2024-12-09T11:25:24,085 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:25:24,085 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:25:24,085 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:25:24,085 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,43325,1733743523795 with port=38937, startcode=1733743523844 2024-12-09T11:25:24,086 DEBUG [RS:0;2dff3a36d44f:38937 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:25:24,088 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47205, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:25:24,089 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,089 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,090 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658 2024-12-09T11:25:24,090 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41739 2024-12-09T11:25:24,090 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:25:24,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:25:24,093 DEBUG [RS:0;2dff3a36d44f:38937 {}] zookeeper.ZKUtil(111): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,093 WARN [RS:0;2dff3a36d44f:38937 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:25:24,093 INFO [RS:0;2dff3a36d44f:38937 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:25:24,093 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,093 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,38937,1733743523844] 2024-12-09T11:25:24,096 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:25:24,098 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:25:24,098 INFO [RS:0;2dff3a36d44f:38937 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:25:24,098 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,099 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:25:24,100 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:25:24,100 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:25:24,100 DEBUG [RS:0;2dff3a36d44f:38937 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:25:24,100 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,100 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,100 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,101 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,101 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,101 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38937,1733743523844-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:25:24,122 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:25:24,122 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,38937,1733743523844-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,122 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,122 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.Replication(171): 2dff3a36d44f,38937,1733743523844 started 2024-12-09T11:25:24,140 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,140 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,38937,1733743523844, RpcServer on 2dff3a36d44f/172.17.0.3:38937, sessionid=0x1012aee47df0001 2024-12-09T11:25:24,140 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:25:24,140 DEBUG [RS:0;2dff3a36d44f:38937 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,140 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,38937,1733743523844' 2024-12-09T11:25:24,140 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,38937,1733743523844' 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:25:24,141 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:25:24,142 DEBUG [RS:0;2dff3a36d44f:38937 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:25:24,142 INFO [RS:0;2dff3a36d44f:38937 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:25:24,142 INFO [RS:0;2dff3a36d44f:38937 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:25:24,209 WARN [2dff3a36d44f:43325 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T11:25:24,244 INFO [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C38937%2C1733743523844, suffix=, logDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844, archiveDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/oldWALs, maxLogs=32 2024-12-09T11:25:24,245 INFO [RS:0;2dff3a36d44f:38937 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C38937%2C1733743523844.1733743524244 2024-12-09T11:25:24,250 INFO [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743524244 2024-12-09T11:25:24,251 DEBUG [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36675:36675),(127.0.0.1/127.0.0.1:36943:36943)] 2024-12-09T11:25:24,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:24,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:24,460 DEBUG [2dff3a36d44f:43325 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:25:24,460 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,462 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,38937,1733743523844, state=OPENING 2024-12-09T11:25:24,463 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:25:24,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:24,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:25:24,466 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:25:24,466 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:25:24,466 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:25:24,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,38937,1733743523844}] 2024-12-09T11:25:24,619 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:25:24,620 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40781, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:25:24,624 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:25:24,624 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:25:24,626 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C38937%2C1733743523844.meta, suffix=.meta, logDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844, archiveDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/oldWALs, maxLogs=32 2024-12-09T11:25:24,626 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C38937%2C1733743523844.meta.1733743524626.meta 2024-12-09T11:25:24,631 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.meta.1733743524626.meta 2024-12-09T11:25:24,631 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36675:36675),(127.0.0.1/127.0.0.1:36943:36943)] 2024-12-09T11:25:24,634 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:25:24,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:25:24,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:25:24,635 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:25:24,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:25:24,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:24,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:25:24,635 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:25:24,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:25:24,637 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:25:24,637 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:25:24,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:25:24,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:25:24,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:25:24,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:25:24,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:25:24,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:25:24,641 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:25:24,642 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740 2024-12-09T11:25:24,643 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740 2024-12-09T11:25:24,644 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:25:24,644 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:25:24,644 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:25:24,646 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:25:24,646 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797131, jitterRate=0.013605087995529175}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:25:24,646 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:25:24,647 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743524635Writing region info on filesystem at 1733743524635Initializing all the Stores at 1733743524636 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743524636Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743524636Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743524636Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743524636Cleaning up temporary data from old regions at 1733743524644 (+8 ms)Running coprocessor post-open hooks at 1733743524646 (+2 ms)Region opened successfully at 1733743524647 (+1 ms) 2024-12-09T11:25:24,648 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743524618 2024-12-09T11:25:24,651 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:25:24,651 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:25:24,651 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,652 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,38937,1733743523844, state=OPEN 2024-12-09T11:25:24,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:25:24,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:25:24,659 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:25:24,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:25:24,661 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:25:24,661 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,38937,1733743523844 in 193 msec 2024-12-09T11:25:24,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:25:24,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-12-09T11:25:24,664 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:25:24,664 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:25:24,666 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:25:24,666 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,38937,1733743523844, seqNum=-1] 2024-12-09T11:25:24,666 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:25:24,667 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45659, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:25:24,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 649 msec 2024-12-09T11:25:24,672 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743524672, completionTime=-1 2024-12-09T11:25:24,672 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:25:24,672 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:25:24,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:25:24,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743584674 2024-12-09T11:25:24,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743644674 2024-12-09T11:25:24,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T11:25:24,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43325,1733743523795-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,674 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43325,1733743523795-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,675 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43325,1733743523795-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,675 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:43325, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,675 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,675 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,677 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.801sec 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43325,1733743523795-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:25:24,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43325,1733743523795-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:25:24,681 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:25:24,681 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:25:24,681 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43325,1733743523795-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:25:24,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4597b1d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:25:24,758 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,43325,-1 for getting cluster id 2024-12-09T11:25:24,758 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:25:24,759 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fbc0833f-e6e6-4234-a1fb-9ad018b5b461' 2024-12-09T11:25:24,760 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:25:24,760 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fbc0833f-e6e6-4234-a1fb-9ad018b5b461" 2024-12-09T11:25:24,760 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3417e1e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:25:24,760 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,43325,-1] 2024-12-09T11:25:24,760 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:25:24,761 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:25:24,762 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36382, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:25:24,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7aa50212, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:25:24,763 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:25:24,763 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,38937,1733743523844, seqNum=-1] 2024-12-09T11:25:24,764 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:25:24,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:25:24,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:24,767 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:25:24,769 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:25:24,769 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T11:25:24,770 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 2dff3a36d44f,43325,1733743523795 2024-12-09T11:25:24,770 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@71f19645 2024-12-09T11:25:24,770 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T11:25:24,771 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36388, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T11:25:24,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T11:25:24,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T11:25:24,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:25:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T11:25:24,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T11:25:24,775 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:24,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-09T11:25:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:25:24,776 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T11:25:24,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741835_1011 (size=381) 2024-12-09T11:25:24,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741835_1011 (size=381) 2024-12-09T11:25:24,790 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7f8de01bf71778d802e7a54d45d7719b, NAME => 'TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658 2024-12-09T11:25:24,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741836_1012 (size=64) 2024-12-09T11:25:24,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741836_1012 (size=64) 2024-12-09T11:25:24,801 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:24,801 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7f8de01bf71778d802e7a54d45d7719b, disabling compactions & flushes 2024-12-09T11:25:24,802 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:24,802 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:24,802 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. after waiting 0 ms 2024-12-09T11:25:24,802 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:24,802 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:24,802 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7f8de01bf71778d802e7a54d45d7719b: Waiting for close lock at 1733743524801Disabling compacts and flushes for region at 1733743524801Disabling writes for close at 1733743524802 (+1 ms)Writing region close event to WAL at 1733743524802Closed at 1733743524802 2024-12-09T11:25:24,803 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T11:25:24,804 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733743524803"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743524803"}]},"ts":"1733743524803"} 2024-12-09T11:25:24,806 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T11:25:24,807 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T11:25:24,808 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743524807"}]},"ts":"1733743524807"} 2024-12-09T11:25:24,810 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T11:25:24,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, ASSIGN}] 2024-12-09T11:25:24,812 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, ASSIGN 2024-12-09T11:25:24,813 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, ASSIGN; state=OFFLINE, location=2dff3a36d44f,38937,1733743523844; forceNewPlan=false, retain=false 2024-12-09T11:25:24,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7f8de01bf71778d802e7a54d45d7719b, regionState=OPENING, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:24,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, ASSIGN because future has completed 2024-12-09T11:25:24,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844}] 2024-12-09T11:25:25,124 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:25,124 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7f8de01bf71778d802e7a54d45d7719b, NAME => 'TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:25:25,124 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,124 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:25,124 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,125 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,126 INFO [StoreOpener-7f8de01bf71778d802e7a54d45d7719b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,128 INFO [StoreOpener-7f8de01bf71778d802e7a54d45d7719b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7f8de01bf71778d802e7a54d45d7719b columnFamilyName info 2024-12-09T11:25:25,128 DEBUG [StoreOpener-7f8de01bf71778d802e7a54d45d7719b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:25,128 INFO [StoreOpener-7f8de01bf71778d802e7a54d45d7719b-1 {}] regionserver.HStore(327): Store=7f8de01bf71778d802e7a54d45d7719b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:25:25,129 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,129 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,130 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,130 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,130 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,132 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,134 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:25:25,136 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7f8de01bf71778d802e7a54d45d7719b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694324, jitterRate=-0.11712172627449036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:25:25,136 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:25,137 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7f8de01bf71778d802e7a54d45d7719b: Running coprocessor pre-open hook at 1733743525125Writing region info on filesystem at 1733743525125Initializing all the Stores at 1733743525125Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743525125Cleaning up temporary data from old regions at 1733743525130 (+5 ms)Running coprocessor post-open hooks at 1733743525136 (+6 ms)Region opened successfully at 1733743525137 (+1 ms) 2024-12-09T11:25:25,138 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., pid=6, masterSystemTime=1733743525120 2024-12-09T11:25:25,141 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:25,141 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:25,142 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7f8de01bf71778d802e7a54d45d7719b, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:25,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 because future has completed 2024-12-09T11:25:25,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T11:25:25,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 in 179 msec 2024-12-09T11:25:25,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T11:25:25,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, ASSIGN in 339 msec 2024-12-09T11:25:25,154 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T11:25:25,154 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733743525154"}]},"ts":"1733743525154"} 2024-12-09T11:25:25,156 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T11:25:25,157 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T11:25:25,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 386 msec 2024-12-09T11:25:25,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:25,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:26,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T11:25:26,171 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T11:25:26,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T11:25:26,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:26,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:27,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:27,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:27,561 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:25:27,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,596 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:27,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:28,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:28,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:29,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:29,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:30,097 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T11:25:30,097 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T11:25:30,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:30,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:31,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:31,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:32,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:32,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:33,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:33,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:34,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:34,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:34,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T11:25:34,796 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-09T11:25:34,796 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-09T11:25:34,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T11:25:34,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:34,802 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2] 2024-12-09T11:25:34,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:34,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:25:34,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/7461b459cc7141e2ba0235eafddc6448 is 1080, key is row0001/info:/1733743534803/Put/seqid=0 2024-12-09T11:25:34,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741837_1013 (size=12509) 2024-12-09T11:25:34,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741837_1013 (size=12509) 2024-12-09T11:25:34,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/7461b459cc7141e2ba0235eafddc6448 2024-12-09T11:25:34,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/7461b459cc7141e2ba0235eafddc6448 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/7461b459cc7141e2ba0235eafddc6448 2024-12-09T11:25:34,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:25:34,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:59828 deadline: 1733743544852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:34,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/7461b459cc7141e2ba0235eafddc6448, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T11:25:34,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 7f8de01bf71778d802e7a54d45d7719b in 46ms, sequenceid=11, compaction requested=false 2024-12-09T11:25:34,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:34,860 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:25:34,861 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:25:34,861 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2 because the exception is null or not the one we care about 2024-12-09T11:25:35,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:35,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:36,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:36,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:37,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:37,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:38,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:38,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:39,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:39,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:40,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:40,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:41,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:41,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:42,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:42,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:43,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:43,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:44,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:44,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:44,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:44,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T11:25:44,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/04186ca5b4134df6b573169167873542 is 1080, key is row0008/info:/1733743534814/Put/seqid=0 2024-12-09T11:25:44,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741838_1014 (size=29761) 2024-12-09T11:25:44,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741838_1014 (size=29761) 2024-12-09T11:25:44,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/04186ca5b4134df6b573169167873542 2024-12-09T11:25:44,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/04186ca5b4134df6b573169167873542 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542 2024-12-09T11:25:44,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542, entries=23, sequenceid=37, filesize=29.1 K 2024-12-09T11:25:44,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 7f8de01bf71778d802e7a54d45d7719b in 24ms, sequenceid=37, compaction requested=false 2024-12-09T11:25:44,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:44,982 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-09T11:25:44,982 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:44,982 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542 because midkey is the same as first or last row 2024-12-09T11:25:45,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:45,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:46,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:46,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:46,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:25:46,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/fc1ff2eb4d864941a9682c7f03a4f3c4 is 1080, key is row0031/info:/1733743544959/Put/seqid=0 2024-12-09T11:25:46,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741839_1015 (size=12509) 2024-12-09T11:25:46,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741839_1015 (size=12509) 2024-12-09T11:25:46,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/fc1ff2eb4d864941a9682c7f03a4f3c4 2024-12-09T11:25:46,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/fc1ff2eb4d864941a9682c7f03a4f3c4 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/fc1ff2eb4d864941a9682c7f03a4f3c4 2024-12-09T11:25:47,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/fc1ff2eb4d864941a9682c7f03a4f3c4, entries=7, sequenceid=47, filesize=12.2 K 2024-12-09T11:25:47,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 7f8de01bf71778d802e7a54d45d7719b in 36ms, sequenceid=47, compaction requested=true 2024-12-09T11:25:47,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:47,006 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-09T11:25:47,006 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:47,007 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542 because midkey is the same as first or last row 2024-12-09T11:25:47,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:47,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f8de01bf71778d802e7a54d45d7719b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:25:47,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:47,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-09T11:25:47,008 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:25:47,009 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:25:47,009 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 7f8de01bf71778d802e7a54d45d7719b/info is initiating minor compaction (all files) 2024-12-09T11:25:47,009 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7f8de01bf71778d802e7a54d45d7719b/info in TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:47,010 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/7461b459cc7141e2ba0235eafddc6448, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/fc1ff2eb4d864941a9682c7f03a4f3c4] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp, totalSize=53.5 K 2024-12-09T11:25:47,010 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7461b459cc7141e2ba0235eafddc6448, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733743534803 2024-12-09T11:25:47,012 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04186ca5b4134df6b573169167873542, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733743534814 2024-12-09T11:25:47,012 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc1ff2eb4d864941a9682c7f03a4f3c4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733743544959 2024-12-09T11:25:47,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/87dc30d415e8402b8bb9ff37aab1d72e is 1080, key is row0038/info:/1733743546971/Put/seqid=0 2024-12-09T11:25:47,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741840_1016 (size=26530) 2024-12-09T11:25:47,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741840_1016 (size=26530) 2024-12-09T11:25:47,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/87dc30d415e8402b8bb9ff37aab1d72e 2024-12-09T11:25:47,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/87dc30d415e8402b8bb9ff37aab1d72e as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/87dc30d415e8402b8bb9ff37aab1d72e 2024-12-09T11:25:47,057 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f8de01bf71778d802e7a54d45d7719b#info#compaction#59 average throughput is 9.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:25:47,057 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/a08fe8ca15bd4a969ca22a3ccc6d6eec is 1080, key is row0001/info:/1733743534803/Put/seqid=0 2024-12-09T11:25:47,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/87dc30d415e8402b8bb9ff37aab1d72e, entries=20, sequenceid=70, filesize=25.9 K 2024-12-09T11:25:47,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=7.36 KB/7532 for 7f8de01bf71778d802e7a54d45d7719b in 62ms, sequenceid=70, compaction requested=false 2024-12-09T11:25:47,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:47,069 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=79.4 K, sizeToCheck=16.0 K 2024-12-09T11:25:47,069 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:47,069 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542 because midkey is the same as first or last row 2024-12-09T11:25:47,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741841_1017 (size=44978) 2024-12-09T11:25:47,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741841_1017 (size=44978) 2024-12-09T11:25:47,099 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/a08fe8ca15bd4a969ca22a3ccc6d6eec as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec 2024-12-09T11:25:47,105 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7f8de01bf71778d802e7a54d45d7719b/info of 7f8de01bf71778d802e7a54d45d7719b into a08fe8ca15bd4a969ca22a3ccc6d6eec(size=43.9 K), total size for store is 69.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:25:47,105 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:47,105 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., storeName=7f8de01bf71778d802e7a54d45d7719b/info, priority=13, startTime=1733743547007; duration=0sec 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec because midkey is the same as first or last row 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec because midkey is the same as first or last row 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.8 K, sizeToCheck=16.0 K 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec because midkey is the same as first or last row 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:47,106 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f8de01bf71778d802e7a54d45d7719b:info 2024-12-09T11:25:47,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:47,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:48,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:48,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:49,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-09T11:25:49,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/cdad49b8a50347bf96aaa27fcf971428 is 1080, key is row0058/info:/1733743547008/Put/seqid=0 2024-12-09T11:25:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741842_1018 (size=13586) 2024-12-09T11:25:49,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741842_1018 (size=13586) 2024-12-09T11:25:49,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/cdad49b8a50347bf96aaa27fcf971428 2024-12-09T11:25:49,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/cdad49b8a50347bf96aaa27fcf971428 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/cdad49b8a50347bf96aaa27fcf971428 2024-12-09T11:25:49,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/cdad49b8a50347bf96aaa27fcf971428, entries=8, sequenceid=82, filesize=13.3 K 2024-12-09T11:25:49,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=12.61 KB/12912 for 7f8de01bf71778d802e7a54d45d7719b in 24ms, sequenceid=82, compaction requested=true 2024-12-09T11:25:49,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:49,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,054 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,054 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,054 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec because midkey is the same as first or last row 2024-12-09T11:25:49,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f8de01bf71778d802e7a54d45d7719b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:25:49,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,055 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:25:49,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-09T11:25:49,056 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:25:49,056 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1541): 7f8de01bf71778d802e7a54d45d7719b/info is initiating minor compaction (all files) 2024-12-09T11:25:49,056 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7f8de01bf71778d802e7a54d45d7719b/info in TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:49,057 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/87dc30d415e8402b8bb9ff37aab1d72e, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/cdad49b8a50347bf96aaa27fcf971428] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp, totalSize=83.1 K 2024-12-09T11:25:49,058 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.Compactor(225): Compacting a08fe8ca15bd4a969ca22a3ccc6d6eec, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733743534803 2024-12-09T11:25:49,058 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.Compactor(225): Compacting 87dc30d415e8402b8bb9ff37aab1d72e, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=70, earliestPutTs=1733743546971 2024-12-09T11:25:49,059 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.Compactor(225): Compacting cdad49b8a50347bf96aaa27fcf971428, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733743547008 2024-12-09T11:25:49,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/e3a546bd3831452080a7e9062b233da8 is 1080, key is row0066/info:/1733743549032/Put/seqid=0 2024-12-09T11:25:49,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741843_1019 (size=18987) 2024-12-09T11:25:49,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741843_1019 (size=18987) 2024-12-09T11:25:49,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/e3a546bd3831452080a7e9062b233da8 2024-12-09T11:25:49,074 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f8de01bf71778d802e7a54d45d7719b#info#compaction#62 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:25:49,074 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/05cf5728c2124d9ebf8261c2df7d9789 is 1080, key is row0001/info:/1733743534803/Put/seqid=0 2024-12-09T11:25:49,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/e3a546bd3831452080a7e9062b233da8 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/e3a546bd3831452080a7e9062b233da8 2024-12-09T11:25:49,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741844_1020 (size=75378) 2024-12-09T11:25:49,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741844_1020 (size=75378) 2024-12-09T11:25:49,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/e3a546bd3831452080a7e9062b233da8, entries=13, sequenceid=98, filesize=18.5 K 2024-12-09T11:25:49,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for 7f8de01bf71778d802e7a54d45d7719b in 29ms, sequenceid=98, compaction requested=false 2024-12-09T11:25:49,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:49,085 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.6 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,085 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,085 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec because midkey is the same as first or last row 2024-12-09T11:25:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-09T11:25:49,088 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/05cf5728c2124d9ebf8261c2df7d9789 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789 2024-12-09T11:25:49,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/53706db6c1984772b1cba29c018879ef is 1080, key is row0079/info:/1733743549056/Put/seqid=0 2024-12-09T11:25:49,096 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7f8de01bf71778d802e7a54d45d7719b/info of 7f8de01bf71778d802e7a54d45d7719b into 05cf5728c2124d9ebf8261c2df7d9789(size=73.6 K), total size for store is 92.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:49,096 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., storeName=7f8de01bf71778d802e7a54d45d7719b/info, priority=13, startTime=1733743549054; duration=0sec 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=92.2 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,096 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,097 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,097 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,097 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f8de01bf71778d802e7a54d45d7719b:info 2024-12-09T11:25:49,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741845_1021 (size=21141) 2024-12-09T11:25:49,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741845_1021 (size=21141) 2024-12-09T11:25:49,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/53706db6c1984772b1cba29c018879ef 2024-12-09T11:25:49,099 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] assignment.AssignmentManager(1363): Split request from 2dff3a36d44f,38937,1733743523844, parent={ENCODED => 7f8de01bf71778d802e7a54d45d7719b, NAME => 'TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T11:25:49,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/53706db6c1984772b1cba29c018879ef as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/53706db6c1984772b1cba29c018879ef 2024-12-09T11:25:49,105 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/53706db6c1984772b1cba29c018879ef, entries=15, sequenceid=116, filesize=20.6 K 2024-12-09T11:25:49,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=3.15 KB/3228 for 7f8de01bf71778d802e7a54d45d7719b in 26ms, sequenceid=116, compaction requested=true 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7f8de01bf71778d802e7a54d45d7719b: 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T11:25:49,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-12-09T11:25:49,111 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=a6e33672d96dcec117787729bb2f311f, daughterB=51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,112 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=a6e33672d96dcec117787729bb2f311f, daughterB=51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,112 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=a6e33672d96dcec117787729bb2f311f, daughterB=51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,112 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=a6e33672d96dcec117787729bb2f311f, daughterB=51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,113 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] assignment.AssignmentManager(1363): Split request from 2dff3a36d44f,38937,1733743523844, parent={ENCODED => 7f8de01bf71778d802e7a54d45d7719b, NAME => 'TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T11:25:49,114 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,115 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43325 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d 2024-12-09T11:25:49,115 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d held by pid=7 2024-12-09T11:25:49,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, UNASSIGN}] 2024-12-09T11:25:49,123 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-12-09T11:25:49,123 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, UNASSIGN 2024-12-09T11:25:49,123 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d 2024-12-09T11:25:49,124 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7f8de01bf71778d802e7a54d45d7719b, regionState=CLOSING, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, UNASSIGN because future has completed 2024-12-09T11:25:49,127 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T11:25:49,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844}] 2024-12-09T11:25:49,285 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,285 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T11:25:49,286 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 7f8de01bf71778d802e7a54d45d7719b, disabling compactions & flushes 2024-12-09T11:25:49,286 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:49,286 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:49,286 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. after waiting 0 ms 2024-12-09T11:25:49,286 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:49,286 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 7f8de01bf71778d802e7a54d45d7719b 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T11:25:49,290 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/eb6908f0809547dabbc5d787ea1333f9 is 1080, key is row0094/info:/1733743549086/Put/seqid=0 2024-12-09T11:25:49,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741846_1022 (size=8193) 2024-12-09T11:25:49,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741846_1022 (size=8193) 2024-12-09T11:25:49,295 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/eb6908f0809547dabbc5d787ea1333f9 2024-12-09T11:25:49,300 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/.tmp/info/eb6908f0809547dabbc5d787ea1333f9 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/eb6908f0809547dabbc5d787ea1333f9 2024-12-09T11:25:49,304 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/eb6908f0809547dabbc5d787ea1333f9, entries=3, sequenceid=123, filesize=8.0 K 2024-12-09T11:25:49,305 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 7f8de01bf71778d802e7a54d45d7719b in 19ms, sequenceid=123, compaction requested=true 2024-12-09T11:25:49,306 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/7461b459cc7141e2ba0235eafddc6448, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/fc1ff2eb4d864941a9682c7f03a4f3c4, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/87dc30d415e8402b8bb9ff37aab1d72e, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/cdad49b8a50347bf96aaa27fcf971428] to archive 2024-12-09T11:25:49,307 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:25:49,309 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/7461b459cc7141e2ba0235eafddc6448 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/7461b459cc7141e2ba0235eafddc6448 2024-12-09T11:25:49,310 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/04186ca5b4134df6b573169167873542 2024-12-09T11:25:49,311 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/a08fe8ca15bd4a969ca22a3ccc6d6eec 2024-12-09T11:25:49,312 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/fc1ff2eb4d864941a9682c7f03a4f3c4 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/fc1ff2eb4d864941a9682c7f03a4f3c4 2024-12-09T11:25:49,313 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/87dc30d415e8402b8bb9ff37aab1d72e to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/87dc30d415e8402b8bb9ff37aab1d72e 2024-12-09T11:25:49,314 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/cdad49b8a50347bf96aaa27fcf971428 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/cdad49b8a50347bf96aaa27fcf971428 2024-12-09T11:25:49,320 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-12-09T11:25:49,321 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. 2024-12-09T11:25:49,321 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 7f8de01bf71778d802e7a54d45d7719b: Waiting for close lock at 1733743549286Running coprocessor pre-close hooks at 1733743549286Disabling compacts and flushes for region at 1733743549286Disabling writes for close at 1733743549286Obtaining lock to block concurrent updates at 1733743549286Preparing flush snapshotting stores in 7f8de01bf71778d802e7a54d45d7719b at 1733743549286Finished memstore snapshotting TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733743549286Flushing stores of TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. at 1733743549287 (+1 ms)Flushing 7f8de01bf71778d802e7a54d45d7719b/info: creating writer at 1733743549287Flushing 7f8de01bf71778d802e7a54d45d7719b/info: appending metadata at 1733743549290 (+3 ms)Flushing 7f8de01bf71778d802e7a54d45d7719b/info: closing flushed file at 1733743549290Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4159d08c: reopening flushed file at 1733743549299 (+9 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 7f8de01bf71778d802e7a54d45d7719b in 19ms, sequenceid=123, compaction requested=true at 1733743549305 (+6 ms)Writing region close event to WAL at 1733743549317 (+12 ms)Running coprocessor post-close hooks at 1733743549320 (+3 ms)Closed at 1733743549320 2024-12-09T11:25:49,323 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,323 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7f8de01bf71778d802e7a54d45d7719b, regionState=CLOSED 2024-12-09T11:25:49,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 because future has completed 2024-12-09T11:25:49,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:49,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:49,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T11:25:49,329 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 7f8de01bf71778d802e7a54d45d7719b, server=2dff3a36d44f,38937,1733743523844 in 199 msec 2024-12-09T11:25:49,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-09T11:25:49,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7f8de01bf71778d802e7a54d45d7719b, UNASSIGN in 209 msec 2024-12-09T11:25:49,338 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:49,342 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=7f8de01bf71778d802e7a54d45d7719b, threads=4 2024-12-09T11:25:49,344 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789 for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,346 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/53706db6c1984772b1cba29c018879ef for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,346 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/eb6908f0809547dabbc5d787ea1333f9 for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,346 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/e3a546bd3831452080a7e9062b233da8 for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,358 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/eb6908f0809547dabbc5d787ea1333f9, top=true 2024-12-09T11:25:49,359 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/e3a546bd3831452080a7e9062b233da8, top=true 2024-12-09T11:25:49,372 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/53706db6c1984772b1cba29c018879ef, top=true 2024-12-09T11:25:49,389 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8 for child: 51c71c438277672a7babb8bcc8349f6a, parent: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,389 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9 for child: 51c71c438277672a7babb8bcc8349f6a, parent: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,389 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/eb6908f0809547dabbc5d787ea1333f9 for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,389 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/e3a546bd3831452080a7e9062b233da8 for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741847_1023 (size=27) 2024-12-09T11:25:49,390 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef for child: 51c71c438277672a7babb8bcc8349f6a, parent: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,390 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/53706db6c1984772b1cba29c018879ef for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741847_1023 (size=27) 2024-12-09T11:25:49,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741848_1024 (size=27) 2024-12-09T11:25:49,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741848_1024 (size=27) 2024-12-09T11:25:49,405 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789 for region: 7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:25:49,407 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 7f8de01bf71778d802e7a54d45d7719b Daughter A: [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b] storefiles, Daughter B: [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9] storefiles. 2024-12-09T11:25:49,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741849_1025 (size=71) 2024-12-09T11:25:49,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741849_1025 (size=71) 2024-12-09T11:25:49,417 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:49,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741850_1026 (size=71) 2024-12-09T11:25:49,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741850_1026 (size=71) 2024-12-09T11:25:49,430 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:49,442 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-09T11:25:49,445 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-09T11:25:49,448 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733743549447"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733743549447"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733743549447"}]},"ts":"1733743549447"} 2024-12-09T11:25:49,448 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733743549447"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743549447"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733743549447"}]},"ts":"1733743549447"} 2024-12-09T11:25:49,448 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733743549447"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733743549447"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733743549447"}]},"ts":"1733743549447"} 2024-12-09T11:25:49,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a6e33672d96dcec117787729bb2f311f, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51c71c438277672a7babb8bcc8349f6a, ASSIGN}] 2024-12-09T11:25:49,471 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a6e33672d96dcec117787729bb2f311f, ASSIGN 2024-12-09T11:25:49,472 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51c71c438277672a7babb8bcc8349f6a, ASSIGN 2024-12-09T11:25:49,472 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a6e33672d96dcec117787729bb2f311f, ASSIGN; state=SPLITTING_NEW, location=2dff3a36d44f,38937,1733743523844; forceNewPlan=false, retain=false 2024-12-09T11:25:49,473 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51c71c438277672a7babb8bcc8349f6a, ASSIGN; state=SPLITTING_NEW, location=2dff3a36d44f,38937,1733743523844; forceNewPlan=false, retain=false 2024-12-09T11:25:49,625 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=51c71c438277672a7babb8bcc8349f6a, regionState=OPENING, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,625 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a6e33672d96dcec117787729bb2f311f, regionState=OPENING, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a6e33672d96dcec117787729bb2f311f, ASSIGN because future has completed 2024-12-09T11:25:49,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a6e33672d96dcec117787729bb2f311f, server=2dff3a36d44f,38937,1733743523844}] 2024-12-09T11:25:49,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51c71c438277672a7babb8bcc8349f6a, ASSIGN because future has completed 2024-12-09T11:25:49,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844}] 2024-12-09T11:25:49,784 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:25:49,784 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 51c71c438277672a7babb8bcc8349f6a, NAME => 'TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T11:25:49,785 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,785 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:49,785 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,785 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,786 INFO [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,787 INFO [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51c71c438277672a7babb8bcc8349f6a columnFamilyName info 2024-12-09T11:25:49,787 DEBUG [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:49,799 DEBUG [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b->hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789-top 2024-12-09T11:25:49,803 DEBUG [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef 2024-12-09T11:25:49,807 DEBUG [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8 2024-12-09T11:25:49,811 DEBUG [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9 2024-12-09T11:25:49,811 INFO [StoreOpener-51c71c438277672a7babb8bcc8349f6a-1 {}] regionserver.HStore(327): Store=51c71c438277672a7babb8bcc8349f6a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:25:49,812 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,812 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,813 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,814 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,814 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,816 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,816 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 51c71c438277672a7babb8bcc8349f6a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777637, jitterRate=-0.011184364557266235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:25:49,816 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:25:49,817 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 51c71c438277672a7babb8bcc8349f6a: Running coprocessor pre-open hook at 1733743549785Writing region info on filesystem at 1733743549785Initializing all the Stores at 1733743549786 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743549786Cleaning up temporary data from old regions at 1733743549814 (+28 ms)Running coprocessor post-open hooks at 1733743549816 (+2 ms)Region opened successfully at 1733743549817 (+1 ms) 2024-12-09T11:25:49,818 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., pid=14, masterSystemTime=1733743549780 2024-12-09T11:25:49,818 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 51c71c438277672a7babb8bcc8349f6a:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:25:49,818 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,818 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T11:25:49,820 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:25:49,820 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 51c71c438277672a7babb8bcc8349f6a/info is initiating minor compaction (all files) 2024-12-09T11:25:49,820 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51c71c438277672a7babb8bcc8349f6a/info in TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:25:49,820 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b->hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789-top, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp, totalSize=120.8 K 2024-12-09T11:25:49,820 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:25:49,820 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:25:49,821 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:25:49,821 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => a6e33672d96dcec117787729bb2f311f, NAME => 'TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T11:25:49,821 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,821 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:25:49,821 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,821 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,821 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=51c71c438277672a7babb8bcc8349f6a, regionState=OPEN, openSeqNum=127, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,821 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733743534803 2024-12-09T11:25:49,822 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733743549032 2024-12-09T11:25:49,822 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733743549056 2024-12-09T11:25:49,822 INFO [StoreOpener-a6e33672d96dcec117787729bb2f311f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,823 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733743549086 2024-12-09T11:25:49,823 INFO [StoreOpener-a6e33672d96dcec117787729bb2f311f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6e33672d96dcec117787729bb2f311f columnFamilyName info 2024-12-09T11:25:49,823 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-09T11:25:49,823 DEBUG [StoreOpener-a6e33672d96dcec117787729bb2f311f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:25:49,823 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T11:25:49,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-09T11:25:49,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 because future has completed 2024-12-09T11:25:49,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-09T11:25:49,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 in 197 msec 2024-12-09T11:25:49,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=51c71c438277672a7babb8bcc8349f6a, ASSIGN in 359 msec 2024-12-09T11:25:49,842 DEBUG [StoreOpener-a6e33672d96dcec117787729bb2f311f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b->hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789-bottom 2024-12-09T11:25:49,843 INFO [StoreOpener-a6e33672d96dcec117787729bb2f311f-1 {}] regionserver.HStore(327): Store=a6e33672d96dcec117787729bb2f311f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:25:49,843 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,844 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,845 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,845 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,845 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,847 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/info/7548e497349c41538be7da45ed6a790c is 193, key is TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a./info:regioninfo/1733743549821/Put/seqid=0 2024-12-09T11:25:49,850 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened a6e33672d96dcec117787729bb2f311f; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872897, jitterRate=0.10994668304920197}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T11:25:49,850 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:25:49,850 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for a6e33672d96dcec117787729bb2f311f: Running coprocessor pre-open hook at 1733743549821Writing region info on filesystem at 1733743549821Initializing all the Stores at 1733743549822 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743549822Cleaning up temporary data from old regions at 1733743549845 (+23 ms)Running coprocessor post-open hooks at 1733743549850 (+5 ms)Region opened successfully at 1733743549850 2024-12-09T11:25:49,851 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f., pid=13, masterSystemTime=1733743549780 2024-12-09T11:25:49,851 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store a6e33672d96dcec117787729bb2f311f:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T11:25:49,851 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,851 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T11:25:49,852 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:25:49,852 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1541): a6e33672d96dcec117787729bb2f311f/info is initiating minor compaction (all files) 2024-12-09T11:25:49,853 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a6e33672d96dcec117787729bb2f311f/info in TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:25:49,853 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b->hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789-bottom] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/.tmp, totalSize=73.6 K 2024-12-09T11:25:49,853 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] compactions.Compactor(225): Compacting 05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733743534803 2024-12-09T11:25:49,854 DEBUG [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:25:49,854 INFO [RS_OPEN_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:25:49,855 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a6e33672d96dcec117787729bb2f311f, regionState=OPEN, openSeqNum=127, regionLocation=2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:49,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a6e33672d96dcec117787729bb2f311f, server=2dff3a36d44f,38937,1733743523844 because future has completed 2024-12-09T11:25:49,863 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c71c438277672a7babb8bcc8349f6a#info#compaction#66 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:25:49,864 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/2eb1b52704aa4fc79333d626550cf825 is 1080, key is row0062/info:/1733743547018/Put/seqid=0 2024-12-09T11:25:49,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-09T11:25:49,865 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure a6e33672d96dcec117787729bb2f311f, server=2dff3a36d44f,38937,1733743523844 in 235 msec 2024-12-09T11:25:49,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741851_1027 (size=9882) 2024-12-09T11:25:49,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-09T11:25:49,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a6e33672d96dcec117787729bb2f311f, ASSIGN in 395 msec 2024-12-09T11:25:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741851_1027 (size=9882) 2024-12-09T11:25:49,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=a6e33672d96dcec117787729bb2f311f, daughterB=51c71c438277672a7babb8bcc8349f6a in 763 msec 2024-12-09T11:25:49,870 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d 2024-12-09T11:25:49,871 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d 2024-12-09T11:25:49,871 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d 2024-12-09T11:25:49,872 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 7f8de01bf71778d802e7a54d45d7719b, NAME => 'TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-12-09T11:25:49,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7f8de01bf71778d802e7a54d45d7719b, daughterA=e17878d45783a2e91ae18aeddcb47834, daughterB=5449bde4413bfae53ddcc6e86432396d in 758 msec 2024-12-09T11:25:49,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/info/7548e497349c41538be7da45ed6a790c 2024-12-09T11:25:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741852_1028 (size=43081) 2024-12-09T11:25:49,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741852_1028 (size=43081) 2024-12-09T11:25:49,891 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6e33672d96dcec117787729bb2f311f#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:25:49,892 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/.tmp/info/c1c213f1559f4c81a13f5a8513a4366e is 1080, key is row0001/info:/1733743534803/Put/seqid=0 2024-12-09T11:25:49,894 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/2eb1b52704aa4fc79333d626550cf825 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/2eb1b52704aa4fc79333d626550cf825 2024-12-09T11:25:49,901 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 51c71c438277672a7babb8bcc8349f6a/info of 51c71c438277672a7babb8bcc8349f6a into 2eb1b52704aa4fc79333d626550cf825(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:25:49,901 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:25:49,901 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., storeName=51c71c438277672a7babb8bcc8349f6a/info, priority=12, startTime=1733743549818; duration=0sec 2024-12-09T11:25:49,901 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,901 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c71c438277672a7babb8bcc8349f6a:info 2024-12-09T11:25:49,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741853_1029 (size=70862) 2024-12-09T11:25:49,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741853_1029 (size=70862) 2024-12-09T11:25:49,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/ns/0f5ef0aab4b04fb580ce6f6e2b5bb7ea is 43, key is default/ns:d/1733743524668/Put/seqid=0 2024-12-09T11:25:49,910 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/.tmp/info/c1c213f1559f4c81a13f5a8513a4366e as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/c1c213f1559f4c81a13f5a8513a4366e 2024-12-09T11:25:49,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741854_1030 (size=5153) 2024-12-09T11:25:49,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741854_1030 (size=5153) 2024-12-09T11:25:49,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/ns/0f5ef0aab4b04fb580ce6f6e2b5bb7ea 2024-12-09T11:25:49,915 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in a6e33672d96dcec117787729bb2f311f/info of a6e33672d96dcec117787729bb2f311f into c1c213f1559f4c81a13f5a8513a4366e(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:25:49,915 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a6e33672d96dcec117787729bb2f311f: 2024-12-09T11:25:49,916 INFO [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f., storeName=a6e33672d96dcec117787729bb2f311f/info, priority=15, startTime=1733743549851; duration=0sec 2024-12-09T11:25:49,916 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:25:49,916 DEBUG [RS:0;2dff3a36d44f:38937-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6e33672d96dcec117787729bb2f311f:info 2024-12-09T11:25:49,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/table/92a734414a4a4670a0b6922ddbf322a2 is 65, key is TestLogRolling-testLogRolling/table:state/1733743525154/Put/seqid=0 2024-12-09T11:25:49,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741855_1031 (size=5340) 2024-12-09T11:25:49,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741855_1031 (size=5340) 2024-12-09T11:25:49,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/table/92a734414a4a4670a0b6922ddbf322a2 2024-12-09T11:25:49,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/info/7548e497349c41538be7da45ed6a790c as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/info/7548e497349c41538be7da45ed6a790c 2024-12-09T11:25:49,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/info/7548e497349c41538be7da45ed6a790c, entries=30, sequenceid=17, filesize=9.7 K 2024-12-09T11:25:49,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/ns/0f5ef0aab4b04fb580ce6f6e2b5bb7ea as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/ns/0f5ef0aab4b04fb580ce6f6e2b5bb7ea 2024-12-09T11:25:49,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/ns/0f5ef0aab4b04fb580ce6f6e2b5bb7ea, entries=2, sequenceid=17, filesize=5.0 K 2024-12-09T11:25:49,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/table/92a734414a4a4670a0b6922ddbf322a2 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/table/92a734414a4a4670a0b6922ddbf322a2 2024-12-09T11:25:49,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/table/92a734414a4a4670a0b6922ddbf322a2, entries=2, sequenceid=17, filesize=5.2 K 2024-12-09T11:25:49,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 137ms, sequenceid=17, compaction requested=false 2024-12-09T11:25:49,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T11:25:50,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:50,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:51,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:59828 deadline: 1733743561094, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. is not online on 2dff3a36d44f,38937,1733743523844 2024-12-09T11:25:51,095 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. is not online on 2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:25:51,095 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b. is not online on 2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:25:51,095 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733743524772.7f8de01bf71778d802e7a54d45d7719b., hostname=2dff3a36d44f,38937,1733743523844, seqNum=2 from cache 2024-12-09T11:25:51,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:51,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:52,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:52,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:53,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:53,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:53,770 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:25:54,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:54,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:54,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,866 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:25:54,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:54,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:25:55,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:55,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:56,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:56,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:57,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:57,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:58,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:58,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:59,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:25:59,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:00,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:00,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:01,212 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127] 2024-12-09T11:26:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:01,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:26:01,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ffc5b753033a4f1ab6fc6c31c01ad677 is 1080, key is row0097/info:/1733743561213/Put/seqid=0 2024-12-09T11:26:01,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741856_1032 (size=12516) 2024-12-09T11:26:01,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741856_1032 (size=12516) 2024-12-09T11:26:01,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:26:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:59828 deadline: 1733743571268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 2024-12-09T11:26:01,269 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:26:01,269 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:26:01,269 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 because the exception is null or not the one we care about 2024-12-09T11:26:01,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:01,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:01,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ffc5b753033a4f1ab6fc6c31c01ad677 2024-12-09T11:26:01,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ffc5b753033a4f1ab6fc6c31c01ad677 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ffc5b753033a4f1ab6fc6c31c01ad677 2024-12-09T11:26:01,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ffc5b753033a4f1ab6fc6c31c01ad677, entries=7, sequenceid=137, filesize=12.2 K 2024-12-09T11:26:01,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 51c71c438277672a7babb8bcc8349f6a in 424ms, sequenceid=137, compaction requested=false 2024-12-09T11:26:01,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:02,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:02,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:03,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:03,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:04,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:04,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:05,237 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T11:26:05,237 INFO [master/2dff3a36d44f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T11:26:05,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:05,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:06,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:06,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:07,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:07,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:08,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:08,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:09,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:09,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:09,635 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T11:26:10,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:10,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:11,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T11:26:11,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/a857b7067ff14a5d9393a7b270791c88 is 1080, key is row0104/info:/1733743561224/Put/seqid=0 2024-12-09T11:26:11,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741857_1033 (size=29784) 2024-12-09T11:26:11,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/a857b7067ff14a5d9393a7b270791c88 2024-12-09T11:26:11,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741857_1033 (size=29784) 2024-12-09T11:26:11,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/a857b7067ff14a5d9393a7b270791c88 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a857b7067ff14a5d9393a7b270791c88 2024-12-09T11:26:11,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:11,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:11,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a857b7067ff14a5d9393a7b270791c88, entries=23, sequenceid=163, filesize=29.1 K 2024-12-09T11:26:11,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 51c71c438277672a7babb8bcc8349f6a in 49ms, sequenceid=163, compaction requested=true 2024-12-09T11:26:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:11,346 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c71c438277672a7babb8bcc8349f6a:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:26:11,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:11,347 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:26:11,348 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:26:11,348 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 51c71c438277672a7babb8bcc8349f6a/info is initiating minor compaction (all files) 2024-12-09T11:26:11,348 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51c71c438277672a7babb8bcc8349f6a/info in TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:11,348 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/2eb1b52704aa4fc79333d626550cf825, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ffc5b753033a4f1ab6fc6c31c01ad677, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a857b7067ff14a5d9393a7b270791c88] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp, totalSize=83.4 K 2024-12-09T11:26:11,348 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2eb1b52704aa4fc79333d626550cf825, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733743547018 2024-12-09T11:26:11,349 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting ffc5b753033a4f1ab6fc6c31c01ad677, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733743561213 2024-12-09T11:26:11,349 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting a857b7067ff14a5d9393a7b270791c88, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733743561224 2024-12-09T11:26:11,366 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c71c438277672a7babb8bcc8349f6a#info#compaction#72 average throughput is 66.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:26:11,367 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/a5f299f2e049469ababa18227c165dcd is 1080, key is row0062/info:/1733743547018/Put/seqid=0 2024-12-09T11:26:11,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741858_1034 (size=75664) 2024-12-09T11:26:11,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741858_1034 (size=75664) 2024-12-09T11:26:11,389 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/a5f299f2e049469ababa18227c165dcd as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a5f299f2e049469ababa18227c165dcd 2024-12-09T11:26:11,395 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51c71c438277672a7babb8bcc8349f6a/info of 51c71c438277672a7babb8bcc8349f6a into a5f299f2e049469ababa18227c165dcd(size=73.9 K), total size for store is 73.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:26:11,395 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:11,395 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., storeName=51c71c438277672a7babb8bcc8349f6a/info, priority=13, startTime=1733743571346; duration=0sec 2024-12-09T11:26:11,395 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:11,395 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c71c438277672a7babb8bcc8349f6a:info 2024-12-09T11:26:12,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:12,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:13,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:26:13,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/5478a41684664ac7b2ba624d3f51df0e is 1080, key is row0127/info:/1733743571298/Put/seqid=0 2024-12-09T11:26:13,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741859_1035 (size=12516) 2024-12-09T11:26:13,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741859_1035 (size=12516) 2024-12-09T11:26:13,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/5478a41684664ac7b2ba624d3f51df0e 2024-12-09T11:26:13,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:13,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:13,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/5478a41684664ac7b2ba624d3f51df0e as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/5478a41684664ac7b2ba624d3f51df0e 2024-12-09T11:26:13,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/5478a41684664ac7b2ba624d3f51df0e, entries=7, sequenceid=174, filesize=12.2 K 2024-12-09T11:26:13,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 51c71c438277672a7babb8bcc8349f6a in 45ms, sequenceid=174, compaction requested=false 2024-12-09T11:26:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:13,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:13,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T11:26:13,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/54c285ee652740349cbcda7da02a4ba7 is 1080, key is row0134/info:/1733743573314/Put/seqid=0 2024-12-09T11:26:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741860_1036 (size=17906) 2024-12-09T11:26:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741860_1036 (size=17906) 2024-12-09T11:26:13,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/54c285ee652740349cbcda7da02a4ba7 2024-12-09T11:26:13,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/54c285ee652740349cbcda7da02a4ba7 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/54c285ee652740349cbcda7da02a4ba7 2024-12-09T11:26:13,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/54c285ee652740349cbcda7da02a4ba7, entries=12, sequenceid=189, filesize=17.5 K 2024-12-09T11:26:13,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for 51c71c438277672a7babb8bcc8349f6a in 56ms, sequenceid=189, compaction requested=true 2024-12-09T11:26:13,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:13,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c71c438277672a7babb8bcc8349f6a:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:26:13,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:13,417 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:26:13,418 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 106086 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:26:13,418 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 51c71c438277672a7babb8bcc8349f6a/info is initiating minor compaction (all files) 2024-12-09T11:26:13,418 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51c71c438277672a7babb8bcc8349f6a/info in TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:13,418 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a5f299f2e049469ababa18227c165dcd, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/5478a41684664ac7b2ba624d3f51df0e, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/54c285ee652740349cbcda7da02a4ba7] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp, totalSize=103.6 K 2024-12-09T11:26:13,419 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5f299f2e049469ababa18227c165dcd, keycount=65, bloomtype=ROW, size=73.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733743547018 2024-12-09T11:26:13,422 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5478a41684664ac7b2ba624d3f51df0e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733743571298 2024-12-09T11:26:13,422 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 54c285ee652740349cbcda7da02a4ba7, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1733743573314 2024-12-09T11:26:13,436 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c71c438277672a7babb8bcc8349f6a#info#compaction#75 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:26:13,437 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ff4a4d16b50940628eb2323349260646 is 1080, key is row0062/info:/1733743547018/Put/seqid=0 2024-12-09T11:26:13,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741861_1037 (size=96252) 2024-12-09T11:26:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741861_1037 (size=96252) 2024-12-09T11:26:13,460 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ff4a4d16b50940628eb2323349260646 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ff4a4d16b50940628eb2323349260646 2024-12-09T11:26:13,471 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51c71c438277672a7babb8bcc8349f6a/info of 51c71c438277672a7babb8bcc8349f6a into ff4a4d16b50940628eb2323349260646(size=94.0 K), total size for store is 94.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:26:13,472 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:13,472 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., storeName=51c71c438277672a7babb8bcc8349f6a/info, priority=13, startTime=1733743573417; duration=0sec 2024-12-09T11:26:13,472 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:13,472 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c71c438277672a7babb8bcc8349f6a:info 2024-12-09T11:26:14,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:14,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:15,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:15,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:15,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:15,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T11:26:15,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ecc19f0561e04a5882c08b1a66d17fb1 is 1080, key is row0146/info:/1733743573366/Put/seqid=0 2024-12-09T11:26:15,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741862_1038 (size=22238) 2024-12-09T11:26:15,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741862_1038 (size=22238) 2024-12-09T11:26:15,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ecc19f0561e04a5882c08b1a66d17fb1 2024-12-09T11:26:15,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/ecc19f0561e04a5882c08b1a66d17fb1 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ecc19f0561e04a5882c08b1a66d17fb1 2024-12-09T11:26:15,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ecc19f0561e04a5882c08b1a66d17fb1, entries=16, sequenceid=209, filesize=21.7 K 2024-12-09T11:26:15,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 51c71c438277672a7babb8bcc8349f6a in 27ms, sequenceid=209, compaction requested=false 2024-12-09T11:26:15,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:15,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T11:26:15,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/61b10045e79a4f6199a89635830b4936 is 1080, key is row0162/info:/1733743575402/Put/seqid=0 2024-12-09T11:26:15,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741863_1039 (size=20078) 2024-12-09T11:26:15,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741863_1039 (size=20078) 2024-12-09T11:26:15,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/61b10045e79a4f6199a89635830b4936 2024-12-09T11:26:15,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/61b10045e79a4f6199a89635830b4936 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/61b10045e79a4f6199a89635830b4936 2024-12-09T11:26:15,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/61b10045e79a4f6199a89635830b4936, entries=14, sequenceid=226, filesize=19.6 K 2024-12-09T11:26:15,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for 51c71c438277672a7babb8bcc8349f6a in 25ms, sequenceid=226, compaction requested=true 2024-12-09T11:26:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c71c438277672a7babb8bcc8349f6a:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:26:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:15,453 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:26:15,454 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138568 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:26:15,454 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 51c71c438277672a7babb8bcc8349f6a/info is initiating minor compaction (all files) 2024-12-09T11:26:15,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:15,454 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51c71c438277672a7babb8bcc8349f6a/info in TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:15,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T11:26:15,455 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ff4a4d16b50940628eb2323349260646, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ecc19f0561e04a5882c08b1a66d17fb1, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/61b10045e79a4f6199a89635830b4936] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp, totalSize=135.3 K 2024-12-09T11:26:15,455 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff4a4d16b50940628eb2323349260646, keycount=84, bloomtype=ROW, size=94.0 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1733743547018 2024-12-09T11:26:15,456 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting ecc19f0561e04a5882c08b1a66d17fb1, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733743573366 2024-12-09T11:26:15,456 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 61b10045e79a4f6199a89635830b4936, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733743575402 2024-12-09T11:26:15,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/0e5e69c913e442e494401ebc17a7ef2d is 1080, key is row0176/info:/1733743575429/Put/seqid=0 2024-12-09T11:26:15,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741864_1040 (size=22238) 2024-12-09T11:26:15,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741864_1040 (size=22238) 2024-12-09T11:26:15,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/0e5e69c913e442e494401ebc17a7ef2d 2024-12-09T11:26:15,473 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c71c438277672a7babb8bcc8349f6a#info#compaction#79 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:26:15,474 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/91a9972bd2e04dfc8c1316c5b4db8013 is 1080, key is row0062/info:/1733743547018/Put/seqid=0 2024-12-09T11:26:15,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/0e5e69c913e442e494401ebc17a7ef2d as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/0e5e69c913e442e494401ebc17a7ef2d 2024-12-09T11:26:15,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/0e5e69c913e442e494401ebc17a7ef2d, entries=16, sequenceid=245, filesize=21.7 K 2024-12-09T11:26:15,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for 51c71c438277672a7babb8bcc8349f6a in 25ms, sequenceid=245, compaction requested=false 2024-12-09T11:26:15,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:15,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741865_1041 (size=128830) 2024-12-09T11:26:15,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741865_1041 (size=128830) 2024-12-09T11:26:15,496 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/91a9972bd2e04dfc8c1316c5b4db8013 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/91a9972bd2e04dfc8c1316c5b4db8013 2024-12-09T11:26:15,502 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51c71c438277672a7babb8bcc8349f6a/info of 51c71c438277672a7babb8bcc8349f6a into 91a9972bd2e04dfc8c1316c5b4db8013(size=125.8 K), total size for store is 147.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:26:15,502 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:15,502 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., storeName=51c71c438277672a7babb8bcc8349f6a/info, priority=13, startTime=1733743575453; duration=0sec 2024-12-09T11:26:15,502 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:15,502 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c71c438277672a7babb8bcc8349f6a:info 2024-12-09T11:26:16,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:16,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:17,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:17,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:17,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:26:17,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/e3912a3587d54196b4838e4042b62346 is 1080, key is row0192/info:/1733743575456/Put/seqid=0 2024-12-09T11:26:17,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741866_1042 (size=12516) 2024-12-09T11:26:17,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741866_1042 (size=12516) 2024-12-09T11:26:17,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:26:17,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:59828 deadline: 1733743587510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 2024-12-09T11:26:17,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:26:17,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:26:17,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 because the exception is null or not the one we care about 2024-12-09T11:26:17,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/e3912a3587d54196b4838e4042b62346 2024-12-09T11:26:17,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/e3912a3587d54196b4838e4042b62346 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/e3912a3587d54196b4838e4042b62346 2024-12-09T11:26:17,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/e3912a3587d54196b4838e4042b62346, entries=7, sequenceid=256, filesize=12.2 K 2024-12-09T11:26:17,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 51c71c438277672a7babb8bcc8349f6a in 449ms, sequenceid=256, compaction requested=true 2024-12-09T11:26:17,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:17,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c71c438277672a7babb8bcc8349f6a:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:26:17,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:17,919 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:26:17,920 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163584 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:26:17,920 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 51c71c438277672a7babb8bcc8349f6a/info is initiating minor compaction (all files) 2024-12-09T11:26:17,920 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51c71c438277672a7babb8bcc8349f6a/info in TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:17,920 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/91a9972bd2e04dfc8c1316c5b4db8013, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/0e5e69c913e442e494401ebc17a7ef2d, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/e3912a3587d54196b4838e4042b62346] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp, totalSize=159.8 K 2024-12-09T11:26:17,920 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91a9972bd2e04dfc8c1316c5b4db8013, keycount=114, bloomtype=ROW, size=125.8 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733743547018 2024-12-09T11:26:17,920 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0e5e69c913e442e494401ebc17a7ef2d, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733743575429 2024-12-09T11:26:17,921 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3912a3587d54196b4838e4042b62346, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733743575456 2024-12-09T11:26:17,930 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c71c438277672a7babb8bcc8349f6a#info#compaction#81 average throughput is 70.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:26:17,931 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/53d92555334b43398fb50ac1f41043a6 is 1080, key is row0062/info:/1733743547018/Put/seqid=0 2024-12-09T11:26:17,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741867_1043 (size=153799) 2024-12-09T11:26:17,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741867_1043 (size=153799) 2024-12-09T11:26:17,939 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/53d92555334b43398fb50ac1f41043a6 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/53d92555334b43398fb50ac1f41043a6 2024-12-09T11:26:17,945 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51c71c438277672a7babb8bcc8349f6a/info of 51c71c438277672a7babb8bcc8349f6a into 53d92555334b43398fb50ac1f41043a6(size=150.2 K), total size for store is 150.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:26:17,945 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:17,945 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., storeName=51c71c438277672a7babb8bcc8349f6a/info, priority=13, startTime=1733743577919; duration=0sec 2024-12-09T11:26:17,945 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:17,945 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c71c438277672a7babb8bcc8349f6a:info 2024-12-09T11:26:18,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:18,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:19,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:19,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:20,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:20,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:21,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:21,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:22,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:22,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:23,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:23,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:23,770 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T11:26:24,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:24,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:25,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:25,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:26,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:26,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:26,687 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=67, reuseRatio=88.16% 2024-12-09T11:26:26,687 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T11:26:27,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:27,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:27,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:27,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T11:26:27,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/b643dc0293b349bab02e724bce5e8c84 is 1080, key is row0199/info:/1733743577471/Put/seqid=0 2024-12-09T11:26:27,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741868_1044 (size=29807) 2024-12-09T11:26:27,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741868_1044 (size=29807) 2024-12-09T11:26:27,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/b643dc0293b349bab02e724bce5e8c84 2024-12-09T11:26:27,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/b643dc0293b349bab02e724bce5e8c84 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/b643dc0293b349bab02e724bce5e8c84 2024-12-09T11:26:27,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/b643dc0293b349bab02e724bce5e8c84, entries=23, sequenceid=283, filesize=29.1 K 2024-12-09T11:26:27,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 51c71c438277672a7babb8bcc8349f6a in 20ms, sequenceid=283, compaction requested=false 2024-12-09T11:26:27,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:28,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:28,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:29,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:29,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:29,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:29,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T11:26:29,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/bf0c2fba05464e8b9a2aa5999ae8bf3f is 1080, key is row0222/info:/1733743587588/Put/seqid=0 2024-12-09T11:26:29,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T11:26:29,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:59828 deadline: 1733743599643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 2024-12-09T11:26:29,644 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:26:29,644 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=51c71c438277672a7babb8bcc8349f6a, server=2dff3a36d44f,38937,1733743523844 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T11:26:29,644 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., hostname=2dff3a36d44f,38937,1733743523844, seqNum=127 because the exception is null or not the one we care about 2024-12-09T11:26:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741869_1045 (size=12523) 2024-12-09T11:26:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741869_1045 (size=12523) 2024-12-09T11:26:29,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/bf0c2fba05464e8b9a2aa5999ae8bf3f 2024-12-09T11:26:29,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/bf0c2fba05464e8b9a2aa5999ae8bf3f as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/bf0c2fba05464e8b9a2aa5999ae8bf3f 2024-12-09T11:26:29,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/bf0c2fba05464e8b9a2aa5999ae8bf3f, entries=7, sequenceid=293, filesize=12.2 K 2024-12-09T11:26:29,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 51c71c438277672a7babb8bcc8349f6a in 80ms, sequenceid=293, compaction requested=true 2024-12-09T11:26:29,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:29,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51c71c438277672a7babb8bcc8349f6a:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T11:26:29,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:29,680 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T11:26:29,681 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 196129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T11:26:29,681 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1541): 51c71c438277672a7babb8bcc8349f6a/info is initiating minor compaction (all files) 2024-12-09T11:26:29,681 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 51c71c438277672a7babb8bcc8349f6a/info in TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:29,681 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/53d92555334b43398fb50ac1f41043a6, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/b643dc0293b349bab02e724bce5e8c84, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/bf0c2fba05464e8b9a2aa5999ae8bf3f] into tmpdir=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp, totalSize=191.5 K 2024-12-09T11:26:29,682 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting 53d92555334b43398fb50ac1f41043a6, keycount=137, bloomtype=ROW, size=150.2 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733743547018 2024-12-09T11:26:29,682 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting b643dc0293b349bab02e724bce5e8c84, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1733743577471 2024-12-09T11:26:29,683 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf0c2fba05464e8b9a2aa5999ae8bf3f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733743587588 2024-12-09T11:26:29,696 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51c71c438277672a7babb8bcc8349f6a#info#compaction#84 average throughput is 57.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T11:26:29,696 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/182a960edd7e426884074241635fa14e is 1080, key is row0062/info:/1733743547018/Put/seqid=0 2024-12-09T11:26:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741870_1046 (size=186279) 2024-12-09T11:26:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741870_1046 (size=186279) 2024-12-09T11:26:29,707 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/182a960edd7e426884074241635fa14e as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/182a960edd7e426884074241635fa14e 2024-12-09T11:26:29,712 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 51c71c438277672a7babb8bcc8349f6a/info of 51c71c438277672a7babb8bcc8349f6a into 182a960edd7e426884074241635fa14e(size=181.9 K), total size for store is 181.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T11:26:29,712 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:29,712 INFO [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., storeName=51c71c438277672a7babb8bcc8349f6a/info, priority=13, startTime=1733743589680; duration=0sec 2024-12-09T11:26:29,712 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T11:26:29,712 DEBUG [RS:0;2dff3a36d44f:38937-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51c71c438277672a7babb8bcc8349f6a:info 2024-12-09T11:26:30,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:30,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:31,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:31,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:32,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:32,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:33,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:33,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:34,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:34,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:34,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:34,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,273 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T11:26:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,294 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T11:26:35,307 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 51c71c438277672a7babb8bcc8349f6a, had cached 0 bytes from a total of 186279 2024-12-09T11:26:35,307 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a6e33672d96dcec117787729bb2f311f, had cached 0 bytes from a total of 70862 2024-12-09T11:26:35,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:35,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:36,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:36,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:37,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:37,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:38,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:38,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:39,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:39,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:39,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38937 {}] regionserver.HRegion(8855): Flush requested on 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:39,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T11:26:39,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/44082603b49d4d7b9d8b769f203085dd is 1080, key is row0229/info:/1733743589600/Put/seqid=0 2024-12-09T11:26:39,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741871_1047 (size=29807) 2024-12-09T11:26:39,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741871_1047 (size=29807) 2024-12-09T11:26:39,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/44082603b49d4d7b9d8b769f203085dd 2024-12-09T11:26:39,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/44082603b49d4d7b9d8b769f203085dd as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/44082603b49d4d7b9d8b769f203085dd 2024-12-09T11:26:39,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/44082603b49d4d7b9d8b769f203085dd, entries=23, sequenceid=320, filesize=29.1 K 2024-12-09T11:26:39,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for 51c71c438277672a7babb8bcc8349f6a in 23ms, sequenceid=320, compaction requested=false 2024-12-09T11:26:39,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:40,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:40,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:41,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:41,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:41,726 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T11:26:41,727 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C38937%2C1733743523844.1733743601726 2024-12-09T11:26:41,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:41,761 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:41,761 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:41,761 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:41,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:41,762 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743524244 with entries=307, filesize=306.84 KB; new WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743601726 2024-12-09T11:26:41,768 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36943:36943),(127.0.0.1/127.0.0.1:36675:36675)] 2024-12-09T11:26:41,768 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743524244 is not closed yet, will try archiving it next time 2024-12-09T11:26:41,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741833_1009 (size=314209) 2024-12-09T11:26:41,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741833_1009 (size=314209) 2024-12-09T11:26:41,772 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 51c71c438277672a7babb8bcc8349f6a 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-09T11:26:41,792 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/8890c0df25d74d959e2b75b6800cfa0f is 1080, key is row0252/info:/1733743599718/Put/seqid=0 2024-12-09T11:26:41,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741873_1049 (size=10357) 2024-12-09T11:26:41,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741873_1049 (size=10357) 2024-12-09T11:26:42,199 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/8890c0df25d74d959e2b75b6800cfa0f 2024-12-09T11:26:42,206 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/.tmp/info/8890c0df25d74d959e2b75b6800cfa0f as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/8890c0df25d74d959e2b75b6800cfa0f 2024-12-09T11:26:42,213 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/8890c0df25d74d959e2b75b6800cfa0f, entries=5, sequenceid=328, filesize=10.1 K 2024-12-09T11:26:42,215 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 51c71c438277672a7babb8bcc8349f6a in 443ms, sequenceid=328, compaction requested=true 2024-12-09T11:26:42,215 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 51c71c438277672a7babb8bcc8349f6a: 2024-12-09T11:26:42,215 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-09T11:26:42,220 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/info/14b8e454305947aa88c7651d0f324158 is 186, key is TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f./info:regioninfo/1733743549855/Put/seqid=0 2024-12-09T11:26:42,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741874_1050 (size=6153) 2024-12-09T11:26:42,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741874_1050 (size=6153) 2024-12-09T11:26:42,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:42,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:42,632 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/info/14b8e454305947aa88c7651d0f324158 2024-12-09T11:26:42,638 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/.tmp/info/14b8e454305947aa88c7651d0f324158 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/info/14b8e454305947aa88c7651d0f324158 2024-12-09T11:26:42,642 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/info/14b8e454305947aa88c7651d0f324158, entries=5, sequenceid=21, filesize=6.0 K 2024-12-09T11:26:42,644 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 428ms, sequenceid=21, compaction requested=false 2024-12-09T11:26:42,644 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T11:26:42,644 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a6e33672d96dcec117787729bb2f311f: 2024-12-09T11:26:42,644 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C38937%2C1733743523844.1733743602644 2024-12-09T11:26:42,654 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:42,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:42,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:42,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:42,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:42,654 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743601726 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743602644 2024-12-09T11:26:42,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741872_1048 (size=731) 2024-12-09T11:26:42,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741872_1048 (size=731) 2024-12-09T11:26:42,667 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36675:36675),(127.0.0.1/127.0.0.1:36943:36943)] 2024-12-09T11:26:42,667 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743601726 is not closed yet, will try archiving it next time 2024-12-09T11:26:42,672 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T11:26:42,674 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743524244 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/oldWALs/2dff3a36d44f%2C38937%2C1733743523844.1733743524244 2024-12-09T11:26:43,064 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/WALs/2dff3a36d44f,38937,1733743523844/2dff3a36d44f%2C38937%2C1733743523844.1733743601726 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/oldWALs/2dff3a36d44f%2C38937%2C1733743523844.1733743601726 2024-12-09T11:26:43,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:26:43,074 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:26:43,074 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:26:43,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:43,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:43,074 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:26:43,074 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:26:43,074 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=690556816, stopped=false 2024-12-09T11:26:43,074 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,43325,1733743523795 2024-12-09T11:26:43,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:26:43,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:26:43,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:43,076 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:26:43,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:43,077 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:26:43,078 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:26:43,078 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:26:43,078 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:26:43,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:43,078 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,38937,1733743523844' ***** 2024-12-09T11:26:43,078 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:26:43,078 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:26:43,079 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(3091): Received CLOSE for 51c71c438277672a7babb8bcc8349f6a 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(3091): Received CLOSE for a6e33672d96dcec117787729bb2f311f 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,38937,1733743523844 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:38937. 2024-12-09T11:26:43,079 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 51c71c438277672a7babb8bcc8349f6a, disabling compactions & flushes 2024-12-09T11:26:43,079 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:43,079 DEBUG [RS:0;2dff3a36d44f:38937 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:26:43,079 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:43,079 DEBUG [RS:0;2dff3a36d44f:38937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:43,079 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. after waiting 0 ms 2024-12-09T11:26:43,079 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:26:43,079 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:26:43,080 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-09T11:26:43,080 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1325): Online Regions={51c71c438277672a7babb8bcc8349f6a=TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a., 1588230740=hbase:meta,,1.1588230740, a6e33672d96dcec117787729bb2f311f=TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.} 2024-12-09T11:26:43,080 DEBUG [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 51c71c438277672a7babb8bcc8349f6a, a6e33672d96dcec117787729bb2f311f 2024-12-09T11:26:43,080 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:26:43,080 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:26:43,080 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:26:43,080 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:26:43,080 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:26:43,080 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b->hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789-top, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/2eb1b52704aa4fc79333d626550cf825, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ffc5b753033a4f1ab6fc6c31c01ad677, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a5f299f2e049469ababa18227c165dcd, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a857b7067ff14a5d9393a7b270791c88, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/5478a41684664ac7b2ba624d3f51df0e, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ff4a4d16b50940628eb2323349260646, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/54c285ee652740349cbcda7da02a4ba7, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ecc19f0561e04a5882c08b1a66d17fb1, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/91a9972bd2e04dfc8c1316c5b4db8013, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/61b10045e79a4f6199a89635830b4936, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/0e5e69c913e442e494401ebc17a7ef2d, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/53d92555334b43398fb50ac1f41043a6, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/e3912a3587d54196b4838e4042b62346, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/b643dc0293b349bab02e724bce5e8c84, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/bf0c2fba05464e8b9a2aa5999ae8bf3f] to archive 2024-12-09T11:26:43,081 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:26:43,083 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:26:43,084 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-e3a546bd3831452080a7e9062b233da8 2024-12-09T11:26:43,086 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-53706db6c1984772b1cba29c018879ef 2024-12-09T11:26:43,087 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/2eb1b52704aa4fc79333d626550cf825 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/2eb1b52704aa4fc79333d626550cf825 2024-12-09T11:26:43,089 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/TestLogRolling-testLogRolling=7f8de01bf71778d802e7a54d45d7719b-eb6908f0809547dabbc5d787ea1333f9 2024-12-09T11:26:43,090 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T11:26:43,090 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ffc5b753033a4f1ab6fc6c31c01ad677 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ffc5b753033a4f1ab6fc6c31c01ad677 2024-12-09T11:26:43,090 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:26:43,090 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:26:43,090 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743603080Running coprocessor pre-close hooks at 1733743603080Disabling compacts and flushes for region at 1733743603080Disabling writes for close at 1733743603080Writing region close event to WAL at 1733743603086 (+6 ms)Running coprocessor post-close hooks at 1733743603090 (+4 ms)Closed at 1733743603090 2024-12-09T11:26:43,090 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:26:43,091 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a5f299f2e049469ababa18227c165dcd to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a5f299f2e049469ababa18227c165dcd 2024-12-09T11:26:43,092 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a857b7067ff14a5d9393a7b270791c88 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/a857b7067ff14a5d9393a7b270791c88 2024-12-09T11:26:43,093 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/5478a41684664ac7b2ba624d3f51df0e to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/5478a41684664ac7b2ba624d3f51df0e 2024-12-09T11:26:43,095 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ff4a4d16b50940628eb2323349260646 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ff4a4d16b50940628eb2323349260646 2024-12-09T11:26:43,096 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/54c285ee652740349cbcda7da02a4ba7 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/54c285ee652740349cbcda7da02a4ba7 2024-12-09T11:26:43,097 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ecc19f0561e04a5882c08b1a66d17fb1 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/ecc19f0561e04a5882c08b1a66d17fb1 2024-12-09T11:26:43,098 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/91a9972bd2e04dfc8c1316c5b4db8013 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/91a9972bd2e04dfc8c1316c5b4db8013 2024-12-09T11:26:43,099 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/61b10045e79a4f6199a89635830b4936 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/61b10045e79a4f6199a89635830b4936 2024-12-09T11:26:43,099 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/0e5e69c913e442e494401ebc17a7ef2d to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/0e5e69c913e442e494401ebc17a7ef2d 2024-12-09T11:26:43,100 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/53d92555334b43398fb50ac1f41043a6 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/53d92555334b43398fb50ac1f41043a6 2024-12-09T11:26:43,101 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T11:26:43,101 INFO [regionserver/2dff3a36d44f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T11:26:43,102 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/e3912a3587d54196b4838e4042b62346 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/e3912a3587d54196b4838e4042b62346 2024-12-09T11:26:43,103 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/b643dc0293b349bab02e724bce5e8c84 to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/b643dc0293b349bab02e724bce5e8c84 2024-12-09T11:26:43,104 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/bf0c2fba05464e8b9a2aa5999ae8bf3f to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/info/bf0c2fba05464e8b9a2aa5999ae8bf3f 2024-12-09T11:26:43,104 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=2dff3a36d44f:43325 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T11:26:43,104 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2eb1b52704aa4fc79333d626550cf825=43081, ffc5b753033a4f1ab6fc6c31c01ad677=12516, a5f299f2e049469ababa18227c165dcd=75664, a857b7067ff14a5d9393a7b270791c88=29784, 5478a41684664ac7b2ba624d3f51df0e=12516, ff4a4d16b50940628eb2323349260646=96252, 54c285ee652740349cbcda7da02a4ba7=17906, ecc19f0561e04a5882c08b1a66d17fb1=22238, 91a9972bd2e04dfc8c1316c5b4db8013=128830, 61b10045e79a4f6199a89635830b4936=20078, 0e5e69c913e442e494401ebc17a7ef2d=22238, 53d92555334b43398fb50ac1f41043a6=153799, e3912a3587d54196b4838e4042b62346=12516, b643dc0293b349bab02e724bce5e8c84=29807, bf0c2fba05464e8b9a2aa5999ae8bf3f=12523] 2024-12-09T11:26:43,108 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/51c71c438277672a7babb8bcc8349f6a/recovered.edits/331.seqid, newMaxSeqId=331, maxSeqId=126 2024-12-09T11:26:43,108 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:43,108 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 51c71c438277672a7babb8bcc8349f6a: Waiting for close lock at 1733743603079Running coprocessor pre-close hooks at 1733743603079Disabling compacts and flushes for region at 1733743603079Disabling writes for close at 1733743603079Writing region close event to WAL at 1733743603105 (+26 ms)Running coprocessor post-close hooks at 1733743603108 (+3 ms)Closed at 1733743603108 2024-12-09T11:26:43,108 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733743549105.51c71c438277672a7babb8bcc8349f6a. 2024-12-09T11:26:43,109 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a6e33672d96dcec117787729bb2f311f, disabling compactions & flushes 2024-12-09T11:26:43,109 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:26:43,109 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:26:43,109 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. after waiting 0 ms 2024-12-09T11:26:43,109 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:26:43,109 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b->hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/7f8de01bf71778d802e7a54d45d7719b/info/05cf5728c2124d9ebf8261c2df7d9789-bottom] to archive 2024-12-09T11:26:43,110 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T11:26:43,111 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b to hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/archive/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/info/05cf5728c2124d9ebf8261c2df7d9789.7f8de01bf71778d802e7a54d45d7719b 2024-12-09T11:26:43,111 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-09T11:26:43,114 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/data/default/TestLogRolling-testLogRolling/a6e33672d96dcec117787729bb2f311f/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-12-09T11:26:43,115 INFO [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:26:43,115 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a6e33672d96dcec117787729bb2f311f: Waiting for close lock at 1733743603109Running coprocessor pre-close hooks at 1733743603109Disabling compacts and flushes for region at 1733743603109Disabling writes for close at 1733743603109Writing region close event to WAL at 1733743603111 (+2 ms)Running coprocessor post-close hooks at 1733743603115 (+4 ms)Closed at 1733743603115 2024-12-09T11:26:43,115 DEBUG [RS_CLOSE_REGION-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733743549105.a6e33672d96dcec117787729bb2f311f. 2024-12-09T11:26:43,280 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,38937,1733743523844; all regions closed. 2024-12-09T11:26:43,281 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,281 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,281 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,281 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,281 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741834_1010 (size=8107) 2024-12-09T11:26:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741834_1010 (size=8107) 2024-12-09T11:26:43,286 DEBUG [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/oldWALs 2024-12-09T11:26:43,286 INFO [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C38937%2C1733743523844.meta:.meta(num 1733743524626) 2024-12-09T11:26:43,287 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741875_1051 (size=780) 2024-12-09T11:26:43,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741875_1051 (size=780) 2024-12-09T11:26:43,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:43,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:43,692 DEBUG [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/oldWALs 2024-12-09T11:26:43,692 INFO [RS:0;2dff3a36d44f:38937 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C38937%2C1733743523844:(num 1733743602644) 2024-12-09T11:26:43,692 DEBUG [RS:0;2dff3a36d44f:38937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:43,693 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:26:43,693 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:26:43,693 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:26:43,693 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:26:43,693 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:26:43,693 INFO [RS:0;2dff3a36d44f:38937 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38937 2024-12-09T11:26:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:26:43,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,38937,1733743523844 2024-12-09T11:26:43,696 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:26:43,696 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,38937,1733743523844] 2024-12-09T11:26:43,699 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,38937,1733743523844 already deleted, retry=false 2024-12-09T11:26:43,699 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,38937,1733743523844 expired; onlineServers=0 2024-12-09T11:26:43,699 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,43325,1733743523795' ***** 2024-12-09T11:26:43,699 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:26:43,700 INFO [M:0;2dff3a36d44f:43325 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:26:43,700 INFO [M:0;2dff3a36d44f:43325 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:26:43,700 DEBUG [M:0;2dff3a36d44f:43325 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:26:43,700 DEBUG [M:0;2dff3a36d44f:43325 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:26:43,700 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743524027 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743524027,5,FailOnTimeoutGroup] 2024-12-09T11:26:43,700 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743524027 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743524027,5,FailOnTimeoutGroup] 2024-12-09T11:26:43,700 INFO [M:0;2dff3a36d44f:43325 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:26:43,700 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:26:43,700 INFO [M:0;2dff3a36d44f:43325 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:26:43,700 DEBUG [M:0;2dff3a36d44f:43325 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:26:43,700 INFO [M:0;2dff3a36d44f:43325 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:26:43,701 INFO [M:0;2dff3a36d44f:43325 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:26:43,701 INFO [M:0;2dff3a36d44f:43325 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:26:43,701 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:26:43,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:26:43,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:43,703 DEBUG [M:0;2dff3a36d44f:43325 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T11:26:43,703 DEBUG [M:0;2dff3a36d44f:43325 {}] master.ActiveMasterManager(353): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T11:26:43,704 INFO [M:0;2dff3a36d44f:43325 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/.lastflushedseqids 2024-12-09T11:26:43,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741876_1052 (size=228) 2024-12-09T11:26:43,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741876_1052 (size=228) 2024-12-09T11:26:43,717 INFO [M:0;2dff3a36d44f:43325 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:26:43,717 INFO [M:0;2dff3a36d44f:43325 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:26:43,717 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:26:43,717 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:43,717 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:43,717 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:26:43,717 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:43,717 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.70 KB heapSize=65.92 KB 2024-12-09T11:26:43,734 DEBUG [M:0;2dff3a36d44f:43325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4755b5acab3f4ef9b20fafd6986b1855 is 82, key is hbase:meta,,1/info:regioninfo/1733743524651/Put/seqid=0 2024-12-09T11:26:43,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741877_1053 (size=5672) 2024-12-09T11:26:43,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741877_1053 (size=5672) 2024-12-09T11:26:43,745 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4755b5acab3f4ef9b20fafd6986b1855 2024-12-09T11:26:43,768 DEBUG [M:0;2dff3a36d44f:43325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee4e453919b847bcb26fdcfa76e087cc is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733743525159/Put/seqid=0 2024-12-09T11:26:43,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741878_1054 (size=7680) 2024-12-09T11:26:43,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741878_1054 (size=7680) 2024-12-09T11:26:43,780 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.09 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee4e453919b847bcb26fdcfa76e087cc 2024-12-09T11:26:43,784 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee4e453919b847bcb26fdcfa76e087cc 2024-12-09T11:26:43,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:43,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38937-0x1012aee47df0001, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:43,797 INFO [RS:0;2dff3a36d44f:38937 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:26:43,798 INFO [RS:0;2dff3a36d44f:38937 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,38937,1733743523844; zookeeper connection closed. 2024-12-09T11:26:43,798 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7671b623 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7671b623 2024-12-09T11:26:43,798 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T11:26:43,805 DEBUG [M:0;2dff3a36d44f:43325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8ca28d7db41e4c5bbaecfb363215ab4c is 69, key is 2dff3a36d44f,38937,1733743523844/rs:state/1733743524089/Put/seqid=0 2024-12-09T11:26:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741879_1055 (size=5156) 2024-12-09T11:26:43,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741879_1055 (size=5156) 2024-12-09T11:26:43,811 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8ca28d7db41e4c5bbaecfb363215ab4c 2024-12-09T11:26:43,835 DEBUG [M:0;2dff3a36d44f:43325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/25d1ae5a0e4943e3b90997e194899e36 is 52, key is load_balancer_on/state:d/1733743524768/Put/seqid=0 2024-12-09T11:26:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741880_1056 (size=5056) 2024-12-09T11:26:43,847 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/25d1ae5a0e4943e3b90997e194899e36 2024-12-09T11:26:43,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741880_1056 (size=5056) 2024-12-09T11:26:43,855 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4755b5acab3f4ef9b20fafd6986b1855 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4755b5acab3f4ef9b20fafd6986b1855 2024-12-09T11:26:43,861 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4755b5acab3f4ef9b20fafd6986b1855, entries=8, sequenceid=129, filesize=5.5 K 2024-12-09T11:26:43,862 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ee4e453919b847bcb26fdcfa76e087cc as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee4e453919b847bcb26fdcfa76e087cc 2024-12-09T11:26:43,867 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ee4e453919b847bcb26fdcfa76e087cc 2024-12-09T11:26:43,868 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ee4e453919b847bcb26fdcfa76e087cc, entries=14, sequenceid=129, filesize=7.5 K 2024-12-09T11:26:43,869 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8ca28d7db41e4c5bbaecfb363215ab4c as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8ca28d7db41e4c5bbaecfb363215ab4c 2024-12-09T11:26:43,873 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8ca28d7db41e4c5bbaecfb363215ab4c, entries=1, sequenceid=129, filesize=5.0 K 2024-12-09T11:26:43,874 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/25d1ae5a0e4943e3b90997e194899e36 as hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/25d1ae5a0e4943e3b90997e194899e36 2024-12-09T11:26:43,881 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41739/user/jenkins/test-data/4f66edc6-6194-78f5-23ca-0d17ba926658/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/25d1ae5a0e4943e3b90997e194899e36, entries=1, sequenceid=129, filesize=4.9 K 2024-12-09T11:26:43,883 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=129, compaction requested=false 2024-12-09T11:26:43,902 INFO [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:43,902 DEBUG [M:0;2dff3a36d44f:43325 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743603717Disabling compacts and flushes for region at 1733743603717Disabling writes for close at 1733743603717Obtaining lock to block concurrent updates at 1733743603717Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743603717Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54985, getHeapSize=67440, getOffHeapSize=0, getCellsCount=152 at 1733743603717Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743603718 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743603719 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743603734 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743603734Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743603749 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743603768 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743603768Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743603784 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743603805 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743603805Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743603816 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743603834 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743603834Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6042d96b: reopening flushed file at 1733743603852 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72d62ea5: reopening flushed file at 1733743603861 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32d0d8ed: reopening flushed file at 1733743603868 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17736a62: reopening flushed file at 1733743603874 (+6 ms)Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=129, compaction requested=false at 1733743603883 (+9 ms)Writing region close event to WAL at 1733743603902 (+19 ms)Closed at 1733743603902 2024-12-09T11:26:43,904 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,904 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:43,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32797 is added to blk_1073741830_1006 (size=63915) 2024-12-09T11:26:43,915 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:26:43,915 INFO [M:0;2dff3a36d44f:43325 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:26:43,915 INFO [M:0;2dff3a36d44f:43325 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43325 2024-12-09T11:26:43,915 INFO [M:0;2dff3a36d44f:43325 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:26:43,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39993 is added to blk_1073741830_1006 (size=63915) 2024-12-09T11:26:44,020 INFO [M:0;2dff3a36d44f:43325 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:26:44,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:44,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43325-0x1012aee47df0000, quorum=127.0.0.1:52296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:44,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@141cb2d0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:26:44,024 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2671310d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:26:44,024 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:26:44,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a3c5032{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:26:44,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c9b8b8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir/,STOPPED} 2024-12-09T11:26:44,025 WARN [BP-1341352516-172.17.0.3-1733743522950 heartbeating to localhost/127.0.0.1:41739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:26:44,025 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:26:44,025 WARN [BP-1341352516-172.17.0.3-1733743522950 heartbeating to localhost/127.0.0.1:41739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1341352516-172.17.0.3-1733743522950 (Datanode Uuid 60082d73-9ac9-40ff-af90-86f55e63cb53) service to localhost/127.0.0.1:41739 2024-12-09T11:26:44,025 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:26:44,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data3/current/BP-1341352516-172.17.0.3-1733743522950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:44,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data4/current/BP-1341352516-172.17.0.3-1733743522950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:44,026 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:26:44,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@676fa0b1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:26:44,029 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f76160c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:26:44,029 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:26:44,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f4fe47f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:26:44,029 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3339c3bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir/,STOPPED} 2024-12-09T11:26:44,030 WARN [BP-1341352516-172.17.0.3-1733743522950 heartbeating to localhost/127.0.0.1:41739 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:26:44,030 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:26:44,030 WARN [BP-1341352516-172.17.0.3-1733743522950 heartbeating to localhost/127.0.0.1:41739 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1341352516-172.17.0.3-1733743522950 (Datanode Uuid 39ef2d8f-28f5-4523-a753-d072ae499f26) service to localhost/127.0.0.1:41739 2024-12-09T11:26:44,030 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:26:44,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data1/current/BP-1341352516-172.17.0.3-1733743522950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:44,031 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/cluster_47adba77-69e8-4af2-0418-f524cdf7a90b/data/data2/current/BP-1341352516-172.17.0.3-1733743522950 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:44,031 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:26:44,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f5b704d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:26:44,039 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d2a8d11{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:26:44,039 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:26:44,039 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa998c3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:26:44,039 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fa21789{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir/,STOPPED} 2024-12-09T11:26:44,049 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:26:44,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:26:44,102 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 206) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41739 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41739 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41739 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41739 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41739 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:41739 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:41739 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:41739 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41739 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=379 (was 331) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=791 (was 750) - AvailableMemoryMB LEAK? - 2024-12-09T11:26:44,105 INFO [regionserver/2dff3a36d44f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:26:44,111 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=379, ProcessCount=11, AvailableMemoryMB=791 2024-12-09T11:26:44,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.log.dir so I do NOT create it in target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/903b6576-04d0-24eb-e75e-220b7721b08b/hadoop.tmp.dir so I do NOT create it in target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727, deleteOnExit=true 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/test.cache.data in system properties and HBase conf 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir in system properties and HBase conf 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T11:26:44,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T11:26:44,112 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/nfs.dump.dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/java.io.tmpdir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T11:26:44,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T11:26:44,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T11:26:44,130 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:26:44,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:26:44,205 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:26:44,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:26:44,211 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:26:44,211 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T11:26:44,212 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:26:44,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bb2f36a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:26:44,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45e45587{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:26:44,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@285ac396{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/java.io.tmpdir/jetty-localhost-45335-hadoop-hdfs-3_4_1-tests_jar-_-any-1192262696556193007/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:26:44,330 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54fed95e{HTTP/1.1, (http/1.1)}{localhost:45335} 2024-12-09T11:26:44,330 INFO [Time-limited test {}] server.Server(415): Started @325603ms 2024-12-09T11:26:44,343 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T11:26:44,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:44,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:44,399 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:26:44,402 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:26:44,402 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:26:44,402 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:26:44,402 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:26:44,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@729fe98a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:26:44,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7095e0d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:26:44,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59a2b0cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/java.io.tmpdir/jetty-localhost-42111-hadoop-hdfs-3_4_1-tests_jar-_-any-16745673355753835897/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:26:44,516 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3893ee93{HTTP/1.1, (http/1.1)}{localhost:42111} 2024-12-09T11:26:44,516 INFO [Time-limited test {}] server.Server(415): Started @325789ms 2024-12-09T11:26:44,517 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:26:44,561 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T11:26:44,563 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T11:26:44,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T11:26:44,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T11:26:44,564 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T11:26:44,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a4842da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir/,AVAILABLE} 2024-12-09T11:26:44,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@244b5b56{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T11:26:44,619 WARN [Thread-2468 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data1/current/BP-1748757753-172.17.0.3-1733743604136/current, will proceed with Du for space computation calculation, 2024-12-09T11:26:44,619 WARN [Thread-2469 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data2/current/BP-1748757753-172.17.0.3-1733743604136/current, will proceed with Du for space computation calculation, 2024-12-09T11:26:44,640 WARN [Thread-2447 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:26:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d10247c5493d833 with lease ID 0xb2beaf50751f33fb: Processing first storage report for DS-f94970c0-9ce0-40e8-b1c3-4b7677d0abf1 from datanode DatanodeRegistration(127.0.0.1:33275, datanodeUuid=79f93338-c0c4-4f22-acd6-14a13a4855aa, infoPort=42331, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136) 2024-12-09T11:26:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d10247c5493d833 with lease ID 0xb2beaf50751f33fb: from storage DS-f94970c0-9ce0-40e8-b1c3-4b7677d0abf1 node DatanodeRegistration(127.0.0.1:33275, datanodeUuid=79f93338-c0c4-4f22-acd6-14a13a4855aa, infoPort=42331, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:26:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9d10247c5493d833 with lease ID 0xb2beaf50751f33fb: Processing first storage report for DS-83e3caca-515e-4318-a0b7-725d9f405040 from datanode DatanodeRegistration(127.0.0.1:33275, datanodeUuid=79f93338-c0c4-4f22-acd6-14a13a4855aa, infoPort=42331, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136) 2024-12-09T11:26:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9d10247c5493d833 with lease ID 0xb2beaf50751f33fb: from storage DS-83e3caca-515e-4318-a0b7-725d9f405040 node DatanodeRegistration(127.0.0.1:33275, datanodeUuid=79f93338-c0c4-4f22-acd6-14a13a4855aa, infoPort=42331, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T11:26:44,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d371f78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/java.io.tmpdir/jetty-localhost-40095-hadoop-hdfs-3_4_1-tests_jar-_-any-7649829038981280232/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:26:44,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34c62d4f{HTTP/1.1, (http/1.1)}{localhost:40095} 2024-12-09T11:26:44,697 INFO [Time-limited test {}] server.Server(415): Started @325970ms 2024-12-09T11:26:44,699 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T11:26:44,815 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data3/current/BP-1748757753-172.17.0.3-1733743604136/current, will proceed with Du for space computation calculation, 2024-12-09T11:26:44,823 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data4/current/BP-1748757753-172.17.0.3-1733743604136/current, will proceed with Du for space computation calculation, 2024-12-09T11:26:44,852 WARN [Thread-2483 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T11:26:44,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x97fc2a0c95f96171 with lease ID 0xb2beaf50751f33fc: Processing first storage report for DS-eadfdcd8-e444-4c63-bb70-8383d897a8f0 from datanode DatanodeRegistration(127.0.0.1:34091, datanodeUuid=6a0b215d-bc80-4a03-b7d7-12ba07a91ae3, infoPort=43157, infoSecurePort=0, ipcPort=42957, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136) 2024-12-09T11:26:44,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x97fc2a0c95f96171 with lease ID 0xb2beaf50751f33fc: from storage DS-eadfdcd8-e444-4c63-bb70-8383d897a8f0 node DatanodeRegistration(127.0.0.1:34091, datanodeUuid=6a0b215d-bc80-4a03-b7d7-12ba07a91ae3, infoPort=43157, infoSecurePort=0, ipcPort=42957, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:26:44,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x97fc2a0c95f96171 with lease ID 0xb2beaf50751f33fc: Processing first storage report for DS-52c49eab-992e-4ad4-a88d-1b8f0de52d0a from datanode DatanodeRegistration(127.0.0.1:34091, datanodeUuid=6a0b215d-bc80-4a03-b7d7-12ba07a91ae3, infoPort=43157, infoSecurePort=0, ipcPort=42957, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136) 2024-12-09T11:26:44,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x97fc2a0c95f96171 with lease ID 0xb2beaf50751f33fc: from storage DS-52c49eab-992e-4ad4-a88d-1b8f0de52d0a node DatanodeRegistration(127.0.0.1:34091, datanodeUuid=6a0b215d-bc80-4a03-b7d7-12ba07a91ae3, infoPort=43157, infoSecurePort=0, ipcPort=42957, storageInfo=lv=-57;cid=testClusterID;nsid=1770806260;c=1733743604136), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T11:26:44,928 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964 2024-12-09T11:26:44,931 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/zookeeper_0, clientPort=57296, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T11:26:44,932 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57296 2024-12-09T11:26:44,932 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:44,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:26:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741825_1001 (size=7) 2024-12-09T11:26:44,946 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2 with version=8 2024-12-09T11:26:44,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38079/user/jenkins/test-data/681711be-0e5c-4430-f91f-24d98e970561/hbase-staging 2024-12-09T11:26:44,948 INFO [Time-limited test {}] client.ConnectionUtils(128): master/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T11:26:44,949 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:26:44,950 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36503 2024-12-09T11:26:44,950 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36503 connecting to ZooKeeper ensemble=127.0.0.1:57296 2024-12-09T11:26:44,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365030x0, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:26:44,959 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36503-0x1012aef84e20000 connected 2024-12-09T11:26:44,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:44,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:44,986 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:26:44,987 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2, hbase.cluster.distributed=false 2024-12-09T11:26:44,988 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:26:44,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36503 2024-12-09T11:26:44,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36503 2024-12-09T11:26:44,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36503 2024-12-09T11:26:44,994 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36503 2024-12-09T11:26:44,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36503 2024-12-09T11:26:45,014 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/2dff3a36d44f:0 server-side Connection retries=45 2024-12-09T11:26:45,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:26:45,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T11:26:45,015 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T11:26:45,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T11:26:45,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T11:26:45,015 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T11:26:45,015 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T11:26:45,016 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43063 2024-12-09T11:26:45,017 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43063 connecting to ZooKeeper ensemble=127.0.0.1:57296 2024-12-09T11:26:45,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:45,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:45,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430630x0, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T11:26:45,025 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43063-0x1012aef84e20001 connected 2024-12-09T11:26:45,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:26:45,026 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T11:26:45,026 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T11:26:45,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T11:26:45,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T11:26:45,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43063 2024-12-09T11:26:45,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43063 2024-12-09T11:26:45,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43063 2024-12-09T11:26:45,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43063 2024-12-09T11:26:45,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43063 2024-12-09T11:26:45,045 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2dff3a36d44f:36503 2024-12-09T11:26:45,046 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:26:45,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:26:45,048 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T11:26:45,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,051 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T11:26:45,052 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2dff3a36d44f,36503,1733743604948 from backup master directory 2024-12-09T11:26:45,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:26:45,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T11:26:45,054 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:26:45,054 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,066 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/hbase.id] with ID: a37a32ae-b1b1-4a82-bd11-55fe93e09193 2024-12-09T11:26:45,066 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/.tmp/hbase.id 2024-12-09T11:26:45,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:26:45,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741826_1002 (size=42) 2024-12-09T11:26:45,079 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/.tmp/hbase.id]:[hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/hbase.id] 2024-12-09T11:26:45,092 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:45,092 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T11:26:45,093 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T11:26:45,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:26:45,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741827_1003 (size=196) 2024-12-09T11:26:45,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:45,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:45,518 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T11:26:45,519 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T11:26:45,519 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:26:45,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:26:45,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741828_1004 (size=1189) 2024-12-09T11:26:45,565 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store 2024-12-09T11:26:45,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:26:45,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741829_1005 (size=34) 2024-12-09T11:26:45,586 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:26:45,586 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:26:45,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:45,586 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:45,586 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:26:45,586 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:45,586 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:45,586 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743605586Disabling compacts and flushes for region at 1733743605586Disabling writes for close at 1733743605586Writing region close event to WAL at 1733743605586Closed at 1733743605586 2024-12-09T11:26:45,587 WARN [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/.initializing 2024-12-09T11:26:45,587 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/WALs/2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,589 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C36503%2C1733743604948, suffix=, logDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/WALs/2dff3a36d44f,36503,1733743604948, archiveDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/oldWALs, maxLogs=10 2024-12-09T11:26:45,590 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C36503%2C1733743604948.1733743605590 2024-12-09T11:26:45,608 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/WALs/2dff3a36d44f,36503,1733743604948/2dff3a36d44f%2C36503%2C1733743604948.1733743605590 2024-12-09T11:26:45,626 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42331:42331),(127.0.0.1/127.0.0.1:43157:43157)] 2024-12-09T11:26:45,638 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:26:45,639 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:26:45,639 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,639 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T11:26:45,644 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:45,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T11:26:45,645 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:26:45,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,647 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T11:26:45,647 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:26:45,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T11:26:45,649 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T11:26:45,649 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,654 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,654 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,655 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,656 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,656 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T11:26:45,657 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T11:26:45,662 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:26:45,662 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743715, jitterRate=-0.054317593574523926}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T11:26:45,663 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733743605639Initializing all the Stores at 1733743605640 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743605640Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743605642 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743605642Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743605642Cleaning up temporary data from old regions at 1733743605656 (+14 ms)Region opened successfully at 1733743605663 (+7 ms) 2024-12-09T11:26:45,666 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T11:26:45,669 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c30d25a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:26:45,670 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T11:26:45,671 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T11:26:45,671 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T11:26:45,671 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T11:26:45,671 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T11:26:45,672 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T11:26:45,672 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T11:26:45,679 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T11:26:45,680 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T11:26:45,682 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T11:26:45,682 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T11:26:45,683 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T11:26:45,684 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T11:26:45,684 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T11:26:45,685 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T11:26:45,686 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T11:26:45,687 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T11:26:45,688 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T11:26:45,690 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T11:26:45,693 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T11:26:45,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:26:45,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T11:26:45,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,699 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=2dff3a36d44f,36503,1733743604948, sessionid=0x1012aef84e20000, setting cluster-up flag (Was=false) 2024-12-09T11:26:45,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,707 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T11:26:45,708 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:45,717 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T11:26:45,723 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:45,724 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T11:26:45,726 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T11:26:45,726 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T11:26:45,726 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T11:26:45,726 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2dff3a36d44f,36503,1733743604948 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2dff3a36d44f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:26:45,728 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,732 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:26:45,732 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T11:26:45,733 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,734 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733743635739 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T11:26:45,739 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,740 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T11:26:45,740 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T11:26:45,740 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T11:26:45,740 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T11:26:45,740 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T11:26:45,741 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(746): ClusterId : a37a32ae-b1b1-4a82-bd11-55fe93e09193 2024-12-09T11:26:45,741 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T11:26:45,741 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743605740,5,FailOnTimeoutGroup] 2024-12-09T11:26:45,742 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743605741,5,FailOnTimeoutGroup] 2024-12-09T11:26:45,742 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,742 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T11:26:45,743 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,743 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,745 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T11:26:45,745 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T11:26:45,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:26:45,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741831_1007 (size=1321) 2024-12-09T11:26:45,747 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T11:26:45,747 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2 2024-12-09T11:26:45,747 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T11:26:45,748 DEBUG [RS:0;2dff3a36d44f:43063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60670902, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2dff3a36d44f/172.17.0.3:0 2024-12-09T11:26:45,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:26:45,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741832_1008 (size=32) 2024-12-09T11:26:45,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:26:45,765 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2dff3a36d44f:43063 2024-12-09T11:26:45,765 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T11:26:45,765 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T11:26:45,765 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T11:26:45,766 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(2659): reportForDuty to master=2dff3a36d44f,36503,1733743604948 with port=43063, startcode=1733743605014 2024-12-09T11:26:45,766 DEBUG [RS:0;2dff3a36d44f:43063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T11:26:45,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:26:45,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:26:45,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:45,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:26:45,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:26:45,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:45,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:26:45,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:26:45,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:45,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:26:45,779 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:26:45,779 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50051, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T11:26:45,779 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:45,780 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36503 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:45,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:45,780 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36503 {}] master.ServerManager(517): Registering regionserver=2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:45,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:26:45,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740 2024-12-09T11:26:45,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740 2024-12-09T11:26:45,782 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2 2024-12-09T11:26:45,782 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40949 2024-12-09T11:26:45,782 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T11:26:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:26:45,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:26:45,783 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:26:45,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:26:45,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:26:45,784 DEBUG [RS:0;2dff3a36d44f:43063 {}] zookeeper.ZKUtil(111): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:45,785 WARN [RS:0;2dff3a36d44f:43063 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T11:26:45,785 INFO [RS:0;2dff3a36d44f:43063 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:26:45,785 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:45,785 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2dff3a36d44f,43063,1733743605014] 2024-12-09T11:26:45,787 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T11:26:45,788 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805380, jitterRate=0.02409440279006958}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:26:45,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733743605764Initializing all the Stores at 1733743605765 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743605765Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743605766 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743605766Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743605766Cleaning up temporary data from old regions at 1733743605782 (+16 ms)Region opened successfully at 1733743605788 (+6 ms) 2024-12-09T11:26:45,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:26:45,789 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:26:45,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:26:45,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:26:45,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:26:45,789 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T11:26:45,790 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:26:45,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743605788Disabling compacts and flushes for region at 1733743605788Disabling writes for close at 1733743605789 (+1 ms)Writing region close event to WAL at 1733743605790 (+1 ms)Closed at 1733743605790 2024-12-09T11:26:45,791 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T11:26:45,791 INFO [RS:0;2dff3a36d44f:43063 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T11:26:45,792 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,792 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:26:45,792 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T11:26:45,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T11:26:45,793 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T11:26:45,793 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:26:45,795 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T11:26:45,803 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T11:26:45,803 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2dff3a36d44f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2dff3a36d44f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:26:45,803 DEBUG [RS:0;2dff3a36d44f:43063 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2dff3a36d44f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T11:26:45,805 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,805 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,805 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,805 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,805 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,805 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43063,1733743605014-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:26:45,825 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T11:26:45,825 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,43063,1733743605014-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,825 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,825 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.Replication(171): 2dff3a36d44f,43063,1733743605014 started 2024-12-09T11:26:45,844 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:45,844 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1482): Serving as 2dff3a36d44f,43063,1733743605014, RpcServer on 2dff3a36d44f/172.17.0.3:43063, sessionid=0x1012aef84e20001 2024-12-09T11:26:45,844 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T11:26:45,844 DEBUG [RS:0;2dff3a36d44f:43063 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:45,844 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,43063,1733743605014' 2024-12-09T11:26:45,844 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2dff3a36d44f,43063,1733743605014' 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T11:26:45,845 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T11:26:45,846 DEBUG [RS:0;2dff3a36d44f:43063 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T11:26:45,846 INFO [RS:0;2dff3a36d44f:43063 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T11:26:45,846 INFO [RS:0;2dff3a36d44f:43063 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T11:26:45,945 WARN [2dff3a36d44f:36503 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T11:26:45,948 INFO [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C43063%2C1733743605014, suffix=, logDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/2dff3a36d44f,43063,1733743605014, archiveDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs, maxLogs=32 2024-12-09T11:26:45,948 INFO [RS:0;2dff3a36d44f:43063 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C43063%2C1733743605014.1733743605948 2024-12-09T11:26:45,954 INFO [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/2dff3a36d44f,43063,1733743605014/2dff3a36d44f%2C43063%2C1733743605014.1733743605948 2024-12-09T11:26:45,955 DEBUG [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43157:43157),(127.0.0.1/127.0.0.1:42331:42331)] 2024-12-09T11:26:46,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:26:46,171 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T11:26:46,172 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T11:26:46,195 DEBUG [2dff3a36d44f:36503 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T11:26:46,196 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:46,197 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,43063,1733743605014, state=OPENING 2024-12-09T11:26:46,199 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T11:26:46,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:46,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:46,200 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T11:26:46,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,43063,1733743605014}] 2024-12-09T11:26:46,201 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:26:46,201 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:26:46,353 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T11:26:46,355 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34071, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T11:26:46,359 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T11:26:46,360 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:26:46,362 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2dff3a36d44f%2C43063%2C1733743605014.meta, suffix=.meta, logDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/2dff3a36d44f,43063,1733743605014, archiveDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs, maxLogs=32 2024-12-09T11:26:46,363 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 2dff3a36d44f%2C43063%2C1733743605014.meta.1733743606363.meta 2024-12-09T11:26:46,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:46,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:46,375 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/2dff3a36d44f,43063,1733743605014/2dff3a36d44f%2C43063%2C1733743605014.meta.1733743606363.meta 2024-12-09T11:26:46,386 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42331:42331),(127.0.0.1/127.0.0.1:43157:43157)] 2024-12-09T11:26:46,388 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T11:26:46,389 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T11:26:46,389 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T11:26:46,389 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T11:26:46,389 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T11:26:46,389 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T11:26:46,389 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T11:26:46,389 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T11:26:46,390 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T11:26:46,391 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T11:26:46,391 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:46,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:46,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T11:26:46,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T11:26:46,393 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:46,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:46,393 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T11:26:46,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T11:26:46,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:46,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:46,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T11:26:46,395 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T11:26:46,395 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T11:26:46,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T11:26:46,395 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T11:26:46,396 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740 2024-12-09T11:26:46,397 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740 2024-12-09T11:26:46,398 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T11:26:46,398 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T11:26:46,399 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T11:26:46,400 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T11:26:46,400 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864419, jitterRate=0.0991663932800293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T11:26:46,401 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T11:26:46,401 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733743606389Writing region info on filesystem at 1733743606389Initializing all the Stores at 1733743606390 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743606390Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743606390Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733743606390Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733743606390Cleaning up temporary data from old regions at 1733743606398 (+8 ms)Running coprocessor post-open hooks at 1733743606401 (+3 ms)Region opened successfully at 1733743606401 2024-12-09T11:26:46,402 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733743606353 2024-12-09T11:26:46,405 DEBUG [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T11:26:46,405 INFO [RS_OPEN_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T11:26:46,406 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:46,407 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2dff3a36d44f,43063,1733743605014, state=OPEN 2024-12-09T11:26:46,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:26:46,416 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:46,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:26:46,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T11:26:46,418 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T11:26:46,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T11:26:46,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=2dff3a36d44f,43063,1733743605014 in 215 msec 2024-12-09T11:26:46,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T11:26:46,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 634 msec 2024-12-09T11:26:46,438 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T11:26:46,438 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T11:26:46,440 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:26:46,440 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,43063,1733743605014, seqNum=-1] 2024-12-09T11:26:46,440 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:26:46,441 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51175, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:26:46,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 726 msec 2024-12-09T11:26:46,454 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733743606454, completionTime=-1 2024-12-09T11:26:46,454 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T11:26:46,454 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T11:26:46,462 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T11:26:46,462 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733743666462 2024-12-09T11:26:46,462 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733743726462 2024-12-09T11:26:46,462 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 7 msec 2024-12-09T11:26:46,473 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36503,1733743604948-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,473 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36503,1733743604948-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,473 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36503,1733743604948-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,473 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2dff3a36d44f:36503, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,473 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,473 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,480 DEBUG [master/2dff3a36d44f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.429sec 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36503,1733743604948-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T11:26:46,483 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36503,1733743604948-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T11:26:46,494 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T11:26:46,494 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T11:26:46,494 INFO [master/2dff3a36d44f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2dff3a36d44f,36503,1733743604948-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T11:26:46,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16413970, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:26:46,541 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 2dff3a36d44f,36503,-1 for getting cluster id 2024-12-09T11:26:46,541 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T11:26:46,542 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a37a32ae-b1b1-4a82-bd11-55fe93e09193' 2024-12-09T11:26:46,543 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T11:26:46,543 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a37a32ae-b1b1-4a82-bd11-55fe93e09193" 2024-12-09T11:26:46,543 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18f6f19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:26:46,543 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [2dff3a36d44f,36503,-1] 2024-12-09T11:26:46,544 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T11:26:46,544 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:46,545 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59142, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T11:26:46,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d98d4ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T11:26:46,546 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T11:26:46,547 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=2dff3a36d44f,43063,1733743605014, seqNum=-1] 2024-12-09T11:26:46,548 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T11:26:46,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58396, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T11:26:46,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:46,551 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T11:26:46,553 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T11:26:46,554 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T11:26:46,556 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs, maxLogs=32 2024-12-09T11:26:46,556 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733743606556 2024-12-09T11:26:46,562 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733743606556 2024-12-09T11:26:46,565 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43157:43157),(127.0.0.1/127.0.0.1:42331:42331)] 2024-12-09T11:26:46,566 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733743606566 2024-12-09T11:26:46,577 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,577 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,577 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,577 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,577 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,577 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733743606556 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733743606566 2024-12-09T11:26:46,581 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43157:43157),(127.0.0.1/127.0.0.1:42331:42331)] 2024-12-09T11:26:46,581 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733743606556 is not closed yet, will try archiving it next time 2024-12-09T11:26:46,583 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741835_1011 (size=93) 2024-12-09T11:26:46,583 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,583 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,583 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,583 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741835_1011 (size=93) 2024-12-09T11:26:46,584 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/WALs/test.com,8080,1/test.com%2C8080%2C1.1733743606556 to hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs/test.com%2C8080%2C1.1733743606556 2024-12-09T11:26:46,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741836_1012 (size=93) 2024-12-09T11:26:46,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741836_1012 (size=93) 2024-12-09T11:26:46,588 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs 2024-12-09T11:26:46,588 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733743606566) 2024-12-09T11:26:46,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T11:26:46,588 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:26:46,588 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:26:46,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:46,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:46,589 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T11:26:46,589 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T11:26:46,589 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1930339561, stopped=false 2024-12-09T11:26:46,589 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=2dff3a36d44f,36503,1733743604948 2024-12-09T11:26:46,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:26:46,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T11:26:46,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:46,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:46,590 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:26:46,590 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T11:26:46,591 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:26:46,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:46,591 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:26:46,591 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '2dff3a36d44f,43063,1733743605014' ***** 2024-12-09T11:26:46,591 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T11:26:46,592 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(959): stopping server 2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;2dff3a36d44f:43063. 2024-12-09T11:26:46,592 DEBUG [RS:0;2dff3a36d44f:43063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T11:26:46,592 DEBUG [RS:0;2dff3a36d44f:43063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:46,592 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T11:26:46,594 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T11:26:46,594 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T11:26:46,594 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T11:26:46,594 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T11:26:46,595 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T11:26:46,595 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T11:26:46,595 DEBUG [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T11:26:46,595 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T11:26:46,595 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T11:26:46,595 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T11:26:46,595 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T11:26:46,595 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T11:26:46,595 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T11:26:46,615 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/.tmp/ns/fc51a944e3e942adac6247c1c7c74d09 is 43, key is default/ns:d/1733743606442/Put/seqid=0 2024-12-09T11:26:46,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741837_1013 (size=5153) 2024-12-09T11:26:46,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741837_1013 (size=5153) 2024-12-09T11:26:46,621 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/.tmp/ns/fc51a944e3e942adac6247c1c7c74d09 2024-12-09T11:26:46,626 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/.tmp/ns/fc51a944e3e942adac6247c1c7c74d09 as hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/ns/fc51a944e3e942adac6247c1c7c74d09 2024-12-09T11:26:46,630 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/ns/fc51a944e3e942adac6247c1c7c74d09, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T11:26:46,631 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false 2024-12-09T11:26:46,634 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T11:26:46,635 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T11:26:46,635 INFO [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T11:26:46,635 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733743606595Running coprocessor pre-close hooks at 1733743606595Disabling compacts and flushes for region at 1733743606595Disabling writes for close at 1733743606595Obtaining lock to block concurrent updates at 1733743606595Preparing flush snapshotting stores in 1588230740 at 1733743606595Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733743606595Flushing stores of hbase:meta,,1.1588230740 at 1733743606596 (+1 ms)Flushing 1588230740/ns: creating writer at 1733743606596Flushing 1588230740/ns: appending metadata at 1733743606615 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733743606615Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20534d27: reopening flushed file at 1733743606625 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 36ms, sequenceid=6, compaction requested=false at 1733743606631 (+6 ms)Writing region close event to WAL at 1733743606632 (+1 ms)Running coprocessor post-close hooks at 1733743606635 (+3 ms)Closed at 1733743606635 2024-12-09T11:26:46,635 DEBUG [RS_CLOSE_META-regionserver/2dff3a36d44f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T11:26:46,795 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(976): stopping server 2dff3a36d44f,43063,1733743605014; all regions closed. 2024-12-09T11:26:46,795 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,795 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,796 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,796 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,796 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741834_1010 (size=1152) 2024-12-09T11:26:46,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741834_1010 (size=1152) 2024-12-09T11:26:46,800 DEBUG [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs 2024-12-09T11:26:46,800 INFO [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C43063%2C1733743605014.meta:.meta(num 1733743606363) 2024-12-09T11:26:46,800 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,800 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,800 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,801 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741833_1009 (size=93) 2024-12-09T11:26:46,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741833_1009 (size=93) 2024-12-09T11:26:46,804 DEBUG [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/oldWALs 2024-12-09T11:26:46,804 INFO [RS:0;2dff3a36d44f:43063 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 2dff3a36d44f%2C43063%2C1733743605014:(num 1733743605948) 2024-12-09T11:26:46,804 DEBUG [RS:0;2dff3a36d44f:43063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T11:26:46,804 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T11:26:46,804 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:26:46,804 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.ChoreService(370): Chore service for: regionserver/2dff3a36d44f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T11:26:46,804 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:26:46,805 INFO [regionserver/2dff3a36d44f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:26:46,805 INFO [RS:0;2dff3a36d44f:43063 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43063 2024-12-09T11:26:46,806 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:26:46,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T11:26:46,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2dff3a36d44f,43063,1733743605014 2024-12-09T11:26:46,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2dff3a36d44f,43063,1733743605014] 2024-12-09T11:26:46,809 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/2dff3a36d44f,43063,1733743605014 already deleted, retry=false 2024-12-09T11:26:46,809 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 2dff3a36d44f,43063,1733743605014 expired; onlineServers=0 2024-12-09T11:26:46,809 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '2dff3a36d44f,36503,1733743604948' ***** 2024-12-09T11:26:46,809 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T11:26:46,809 INFO [M:0;2dff3a36d44f:36503 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T11:26:46,809 INFO [M:0;2dff3a36d44f:36503 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T11:26:46,810 DEBUG [M:0;2dff3a36d44f:36503 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T11:26:46,810 DEBUG [M:0;2dff3a36d44f:36503 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T11:26:46,810 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T11:26:46,810 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743605741 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.small.0-1733743605741,5,FailOnTimeoutGroup] 2024-12-09T11:26:46,810 DEBUG [master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743605740 {}] cleaner.HFileCleaner(306): Exit Thread[master/2dff3a36d44f:0:becomeActiveMaster-HFileCleaner.large.0-1733743605740,5,FailOnTimeoutGroup] 2024-12-09T11:26:46,810 INFO [M:0;2dff3a36d44f:36503 {}] hbase.ChoreService(370): Chore service for: master/2dff3a36d44f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T11:26:46,810 INFO [M:0;2dff3a36d44f:36503 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T11:26:46,810 DEBUG [M:0;2dff3a36d44f:36503 {}] master.HMaster(1795): Stopping service threads 2024-12-09T11:26:46,810 INFO [M:0;2dff3a36d44f:36503 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T11:26:46,810 INFO [M:0;2dff3a36d44f:36503 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T11:26:46,810 INFO [M:0;2dff3a36d44f:36503 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T11:26:46,810 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T11:26:46,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T11:26:46,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T11:26:46,811 DEBUG [M:0;2dff3a36d44f:36503 {}] zookeeper.ZKUtil(347): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T11:26:46,811 WARN [M:0;2dff3a36d44f:36503 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T11:26:46,812 INFO [M:0;2dff3a36d44f:36503 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/.lastflushedseqids 2024-12-09T11:26:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741838_1014 (size=99) 2024-12-09T11:26:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741838_1014 (size=99) 2024-12-09T11:26:46,817 INFO [M:0;2dff3a36d44f:36503 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T11:26:46,817 INFO [M:0;2dff3a36d44f:36503 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T11:26:46,817 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T11:26:46,817 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:46,817 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:46,817 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T11:26:46,817 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:46,817 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T11:26:46,832 DEBUG [M:0;2dff3a36d44f:36503 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b41f345e06de42048d73f098f7c1fd6e is 82, key is hbase:meta,,1/info:regioninfo/1733743606406/Put/seqid=0 2024-12-09T11:26:46,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741839_1015 (size=5672) 2024-12-09T11:26:46,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741839_1015 (size=5672) 2024-12-09T11:26:46,837 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b41f345e06de42048d73f098f7c1fd6e 2024-12-09T11:26:46,881 DEBUG [M:0;2dff3a36d44f:36503 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b367e6209d3d4d319640098d51cf125e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733743606453/Put/seqid=0 2024-12-09T11:26:46,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741840_1016 (size=5275) 2024-12-09T11:26:46,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741840_1016 (size=5275) 2024-12-09T11:26:46,886 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b367e6209d3d4d319640098d51cf125e 2024-12-09T11:26:46,909 INFO [RS:0;2dff3a36d44f:43063 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:26:46,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:46,909 INFO [RS:0;2dff3a36d44f:43063 {}] regionserver.HRegionServer(1031): Exiting; stopping=2dff3a36d44f,43063,1733743605014; zookeeper connection closed. 2024-12-09T11:26:46,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43063-0x1012aef84e20001, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:46,909 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5c2b1dba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5c2b1dba 2024-12-09T11:26:46,909 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T11:26:46,912 DEBUG [M:0;2dff3a36d44f:36503 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6cbb1639806441d8b3353ad9fe7a447 is 69, key is 2dff3a36d44f,43063,1733743605014/rs:state/1733743605780/Put/seqid=0 2024-12-09T11:26:46,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741841_1017 (size=5156) 2024-12-09T11:26:46,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741841_1017 (size=5156) 2024-12-09T11:26:46,926 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6cbb1639806441d8b3353ad9fe7a447 2024-12-09T11:26:46,952 DEBUG [M:0;2dff3a36d44f:36503 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6dfb0a26279148a5954dc664d1c389e7 is 52, key is load_balancer_on/state:d/1733743606552/Put/seqid=0 2024-12-09T11:26:46,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741842_1018 (size=5056) 2024-12-09T11:26:46,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741842_1018 (size=5056) 2024-12-09T11:26:46,961 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6dfb0a26279148a5954dc664d1c389e7 2024-12-09T11:26:46,968 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b41f345e06de42048d73f098f7c1fd6e as hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b41f345e06de42048d73f098f7c1fd6e 2024-12-09T11:26:46,973 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b41f345e06de42048d73f098f7c1fd6e, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T11:26:46,975 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b367e6209d3d4d319640098d51cf125e as hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b367e6209d3d4d319640098d51cf125e 2024-12-09T11:26:46,980 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b367e6209d3d4d319640098d51cf125e, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T11:26:46,981 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c6cbb1639806441d8b3353ad9fe7a447 as hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c6cbb1639806441d8b3353ad9fe7a447 2024-12-09T11:26:46,985 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c6cbb1639806441d8b3353ad9fe7a447, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T11:26:46,986 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6dfb0a26279148a5954dc664d1c389e7 as hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6dfb0a26279148a5954dc664d1c389e7 2024-12-09T11:26:46,990 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40949/user/jenkins/test-data/acb60ebc-4d3c-4a35-c2fa-a364879fecf2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6dfb0a26279148a5954dc664d1c389e7, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T11:26:46,995 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=29, compaction requested=false 2024-12-09T11:26:46,998 INFO [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T11:26:46,998 DEBUG [M:0;2dff3a36d44f:36503 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733743606817Disabling compacts and flushes for region at 1733743606817Disabling writes for close at 1733743606817Obtaining lock to block concurrent updates at 1733743606817Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733743606817Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733743606818 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733743606818Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733743606818Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733743606832 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733743606832Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733743606841 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733743606881 (+40 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733743606881Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733743606891 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733743606912 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733743606912Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733743606933 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733743606952 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733743606952Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b11ac0b: reopening flushed file at 1733743606967 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ce40abb: reopening flushed file at 1733743606973 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@88f14d0: reopening flushed file at 1733743606980 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@90e9f6: reopening flushed file at 1733743606985 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 178ms, sequenceid=29, compaction requested=false at 1733743606995 (+10 ms)Writing region close event to WAL at 1733743606997 (+2 ms)Closed at 1733743606997 2024-12-09T11:26:46,998 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,998 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,998 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:46,998 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:47,002 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T11:26:47,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34091 is added to blk_1073741830_1006 (size=10311) 2024-12-09T11:26:47,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33275 is added to blk_1073741830_1006 (size=10311) 2024-12-09T11:26:47,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42933,1733743388938/2dff3a36d44f%2C42933%2C1733743388938.meta.1733743389925.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:47,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42671/user/jenkins/test-data/fca1427a-49c7-7dc2-a13e-ea0dcfece95e/WALs/2dff3a36d44f,42489,1733743390184/2dff3a36d44f%2C42489%2C1733743390184.1733743390374 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T11:26:47,407 INFO [M:0;2dff3a36d44f:36503 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T11:26:47,407 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T11:26:47,407 INFO [M:0;2dff3a36d44f:36503 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36503 2024-12-09T11:26:47,408 INFO [M:0;2dff3a36d44f:36503 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T11:26:47,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:47,511 INFO [M:0;2dff3a36d44f:36503 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T11:26:47,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36503-0x1012aef84e20000, quorum=127.0.0.1:57296, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T11:26:47,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d371f78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:26:47,514 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34c62d4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:26:47,514 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:26:47,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@244b5b56{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:26:47,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a4842da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir/,STOPPED} 2024-12-09T11:26:47,523 WARN [BP-1748757753-172.17.0.3-1733743604136 heartbeating to localhost/127.0.0.1:40949 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:26:47,523 WARN [BP-1748757753-172.17.0.3-1733743604136 heartbeating to localhost/127.0.0.1:40949 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1748757753-172.17.0.3-1733743604136 (Datanode Uuid 6a0b215d-bc80-4a03-b7d7-12ba07a91ae3) service to localhost/127.0.0.1:40949 2024-12-09T11:26:47,524 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:26:47,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data4/current/BP-1748757753-172.17.0.3-1733743604136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:47,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data3/current/BP-1748757753-172.17.0.3-1733743604136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:47,526 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:26:47,526 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:26:47,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59a2b0cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T11:26:47,534 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3893ee93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:26:47,534 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:26:47,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7095e0d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:26:47,534 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@729fe98a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir/,STOPPED} 2024-12-09T11:26:47,541 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T11:26:47,541 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T11:26:47,542 WARN [BP-1748757753-172.17.0.3-1733743604136 heartbeating to localhost/127.0.0.1:40949 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T11:26:47,542 WARN [BP-1748757753-172.17.0.3-1733743604136 heartbeating to localhost/127.0.0.1:40949 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1748757753-172.17.0.3-1733743604136 (Datanode Uuid 79f93338-c0c4-4f22-acd6-14a13a4855aa) service to localhost/127.0.0.1:40949 2024-12-09T11:26:47,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data1/current/BP-1748757753-172.17.0.3-1733743604136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:47,543 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/cluster_ce936459-8ecc-420a-9516-854003af5727/data/data2/current/BP-1748757753-172.17.0.3-1733743604136 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T11:26:47,543 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T11:26:47,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@285ac396{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T11:26:47,550 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54fed95e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T11:26:47,550 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T11:26:47,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45e45587{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T11:26:47,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bb2f36a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95966128-4ea4-4cbf-7c7d-ee5f7c56d964/hadoop.log.dir/,STOPPED} 2024-12-09T11:26:47,556 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T11:26:47,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T11:26:47,581 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 227) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40949 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:40949 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40949 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40949 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40949 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:40949 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1609174458) connection to localhost/127.0.0.1:40949 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40949 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=421 (was 379) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=664 (was 791)